comparison piecrust/pipelines/asset.py @ 854:08e02c2a2a1a

core: Keep refactoring, this time to prepare for generator sources. - Make a few APIs simpler. - Content pipelines create their own jobs, so that generator sources can keep aborting in `getContents`, but rely on their pipeline to generate pages for baking.
author Ludovic Chabant <ludovic@chabant.com>
date Sun, 04 Jun 2017 23:34:28 -0700
parents f070a4fc033c
children 45ad976712ec
comparison
equal deleted inserted replaced
853:f070a4fc033c 854:08e02c2a2a1a
17 17
18 class AssetPipeline(ContentPipeline): 18 class AssetPipeline(ContentPipeline):
19 PIPELINE_NAME = 'asset' 19 PIPELINE_NAME = 'asset'
20 RECORD_ENTRY_CLASS = AssetPipelineRecordEntry 20 RECORD_ENTRY_CLASS = AssetPipelineRecordEntry
21 21
22 def __init__(self, source): 22 def __init__(self, source, ppctx):
23 if not isinstance(source, FSContentSourceBase): 23 if not isinstance(source, FSContentSourceBase):
24 raise Exception( 24 raise Exception(
25 "The asset pipeline only support file-system sources.") 25 "The asset pipeline only support file-system sources.")
26 26
27 super().__init__(source) 27 super().__init__(source, ppctx)
28 self.enabled_processors = None 28 self.enabled_processors = None
29 self.ignore_patterns = [] 29 self.ignore_patterns = []
30 self._processors = None 30 self._processors = None
31 self._base_dir = source.fs_endpoint_path 31 self._base_dir = source.fs_endpoint_path
32 32
33 def initialize(self, ctx): 33 def initialize(self):
34 # Get the list of processors for this run. 34 # Get the list of processors for this run.
35 processors = self.app.plugin_loader.getProcessors() 35 processors = self.app.plugin_loader.getProcessors()
36 if self.enabled_processors is not None: 36 if self.enabled_processors is not None:
37 logger.debug("Filtering processors to: %s" % 37 logger.debug("Filtering processors to: %s" %
38 self.enabled_processors) 38 self.enabled_processors)
39 processors = get_filtered_processors(processors, 39 processors = get_filtered_processors(processors,
40 self.enabled_processors) 40 self.enabled_processors)
41 41
42 # Invoke pre-processors. 42 # Invoke pre-processors.
43 proc_ctx = ProcessorContext(self, ctx) 43 proc_ctx = ProcessorContext(self)
44 for proc in processors: 44 for proc in processors:
45 proc.onPipelineStart(proc_ctx) 45 proc.onPipelineStart(proc_ctx)
46 46
47 # Add any extra processors registered in the `onPipelineStart` step. 47 # Add any extra processors registered in the `onPipelineStart` step.
48 processors += proc_ctx.extra_processors 48 processors += proc_ctx.extra_processors
60 # Register timers. 60 # Register timers.
61 stats = self.app.env.stats 61 stats = self.app.env.stats
62 stats.registerTimer('BuildProcessingTree', raise_if_registered=False) 62 stats.registerTimer('BuildProcessingTree', raise_if_registered=False)
63 stats.registerTimer('RunProcessingTree', raise_if_registered=False) 63 stats.registerTimer('RunProcessingTree', raise_if_registered=False)
64 64
65 def run(self, content_item, ctx, result): 65 def run(self, job, ctx, result):
66 # See if we need to ignore this item. 66 # See if we need to ignore this item.
67 rel_path = os.path.relpath(content_item.spec, self._base_dir) 67 rel_path = os.path.relpath(job.content_item.spec, self._base_dir)
68 if re_matchany(rel_path, self.ignore_patterns): 68 if re_matchany(rel_path, self.ignore_patterns):
69 return 69 return
70 70
71 record_entry = result.record_entry 71 record_entry = result.record_entry
72 stats = self.app.env.stats 72 stats = self.app.env.stats
73 out_dir = self.ctx.out_dir
73 74
74 # Build the processing tree for this job. 75 # Build the processing tree for this job.
75 with stats.timerScope('BuildProcessingTree'): 76 with stats.timerScope('BuildProcessingTree'):
76 builder = ProcessingTreeBuilder(self._processors) 77 builder = ProcessingTreeBuilder(self._processors)
77 tree_root = builder.build(rel_path) 78 tree_root = builder.build(rel_path)
78 record_entry.flags |= AssetPipelineRecordEntry.FLAG_PREPARED 79 record_entry.flags |= AssetPipelineRecordEntry.FLAG_PREPARED
79 80
80 # Prepare and run the tree. 81 # Prepare and run the tree.
81 print_node(tree_root, recursive=True) 82 print_node(tree_root, recursive=True)
82 leaves = tree_root.getLeaves() 83 leaves = tree_root.getLeaves()
83 record_entry.out_paths = [os.path.join(ctx.out_dir, l.path) 84 record_entry.out_paths = [os.path.join(out_dir, l.path)
84 for l in leaves] 85 for l in leaves]
85 record_entry.proc_tree = get_node_name_tree(tree_root) 86 record_entry.proc_tree = get_node_name_tree(tree_root)
86 if tree_root.getProcessor().is_bypassing_structured_processing: 87 if tree_root.getProcessor().is_bypassing_structured_processing:
87 record_entry.flags |= ( 88 record_entry.flags |= (
88 AssetPipelineRecordEntry.FLAG_BYPASSED_STRUCTURED_PROCESSING) 89 AssetPipelineRecordEntry.FLAG_BYPASSED_STRUCTURED_PROCESSING)
89 90
90 if ctx.force: 91 if self.ctx.force:
91 tree_root.setState(STATE_DIRTY, True) 92 tree_root.setState(STATE_DIRTY, True)
92 93
93 with stats.timerScope('RunProcessingTree'): 94 with stats.timerScope('RunProcessingTree'):
94 runner = ProcessingTreeRunner( 95 runner = ProcessingTreeRunner(
95 self._base_dir, self.tmp_dir, ctx.out_dir) 96 self._base_dir, self.tmp_dir, out_dir)
96 if runner.processSubTree(tree_root): 97 if runner.processSubTree(tree_root):
97 record_entry.flags |= ( 98 record_entry.flags |= (
98 AssetPipelineRecordEntry.FLAG_PROCESSED) 99 AssetPipelineRecordEntry.FLAG_PROCESSED)
99 100
100 def getDeletions(self, ctx): 101 def getDeletions(self, ctx):
116 (prev.flags & ~AssetPipelineRecordEntry.FLAG_PROCESSED) | 117 (prev.flags & ~AssetPipelineRecordEntry.FLAG_PROCESSED) |
117 AssetPipelineRecordEntry.FLAG_COLLAPSED_FROM_LAST_RUN) 118 AssetPipelineRecordEntry.FLAG_COLLAPSED_FROM_LAST_RUN)
118 cur.out_paths = list(prev.out_paths) 119 cur.out_paths = list(prev.out_paths)
119 cur.errors = list(prev.errors) 120 cur.errors = list(prev.errors)
120 121
121 def shutdown(self, ctx): 122 def shutdown(self):
122 # Invoke post-processors. 123 # Invoke post-processors.
123 proc_ctx = ProcessorContext(self, ctx) 124 proc_ctx = ProcessorContext(self)
124 for proc in self._processors: 125 for proc in self._processors:
125 proc.onPipelineEnd(proc_ctx) 126 proc.onPipelineEnd(proc_ctx)
126 127
127 128
128 split_processor_names_re = re.compile(r'[ ,]+') 129 split_processor_names_re = re.compile(r'[ ,]+')