comparison piecrust/baking/baker.py @ 855:448710d84121

refactor: Get the taxonomy support back to a functional state. There's now a taxonomy content source that wraps another normal content source like a blog posts' source. It works in tandem with a taxonomy content pipeline that will do the heavy lifting of figuring out what kind of terms exist and need to be baked.
author Ludovic Chabant <ludovic@chabant.com>
date Tue, 06 Jun 2017 00:26:21 -0700
parents 08e02c2a2a1a
children 8d25f76fce98
comparison
equal deleted inserted replaced
854:08e02c2a2a1a 855:448710d84121
4 import logging 4 import logging
5 from piecrust.chefutil import ( 5 from piecrust.chefutil import (
6 format_timed_scope, format_timed) 6 format_timed_scope, format_timed)
7 from piecrust.environment import ExecutionStats 7 from piecrust.environment import ExecutionStats
8 from piecrust.pipelines.base import ( 8 from piecrust.pipelines.base import (
9 PipelineMergeRecordContext, PipelineManager, 9 PipelineJobCreateContext, PipelineMergeRecordContext, PipelineManager,
10 get_pipeline_name_for_source) 10 get_pipeline_name_for_source)
11 from piecrust.pipelines.records import ( 11 from piecrust.pipelines.records import (
12 MultiRecordHistory, MultiRecord, RecordEntry, 12 MultiRecordHistory, MultiRecord, RecordEntry,
13 load_records) 13 load_records)
14 from piecrust.sources.base import REALM_USER, REALM_THEME 14 from piecrust.sources.base import REALM_USER, REALM_THEME
101 raise Exception("The website has no content sources, or the bake " 101 raise Exception("The website has no content sources, or the bake "
102 "command was invoked with all pipelines filtered " 102 "command was invoked with all pipelines filtered "
103 "out. There's nothing to do.") 103 "out. There's nothing to do.")
104 104
105 # Create the worker processes. 105 # Create the worker processes.
106 pool_userdata = _PoolUserData(self, ppmngr, current_records) 106 pool_userdata = _PoolUserData(self, ppmngr)
107 pool = self._createWorkerPool(records_path, pool_userdata) 107 pool = self._createWorkerPool(records_path, pool_userdata)
108 realm_list = [REALM_USER, REALM_THEME] 108 realm_list = [REALM_USER, REALM_THEME]
109 109
110 # Bake the realms -- user first, theme second, so that a user item 110 # Bake the realms -- user first, theme second, so that a user item
111 # can override a theme item. 111 # can override a theme item.
122 logger.debug("Pipelines pass %d" % pp_pass) 122 logger.debug("Pipelines pass %d" % pp_pass)
123 pp_by_realm = pp_by_pass_and_realm[pp_pass] 123 pp_by_realm = pp_by_pass_and_realm[pp_pass]
124 for realm in realm_list: 124 for realm in realm_list:
125 pplist = pp_by_realm.get(realm) 125 pplist = pp_by_realm.get(realm)
126 if pplist is not None: 126 if pplist is not None:
127 self._bakeRealm(pool, pplist) 127 self._bakeRealm(pool, pp_pass, record_histories, pplist)
128 128
129 # Handle deletions, collapse records, etc. 129 # Handle deletions, collapse records, etc.
130 ppmngr.buildHistoryDiffs() 130 ppmngr.postJobRun()
131 ppmngr.deleteStaleOutputs() 131 ppmngr.deleteStaleOutputs()
132 ppmngr.collapseRecords() 132 ppmngr.collapseRecords()
133 133
134 # All done with the workers. Close the pool and get reports. 134 # All done with the workers. Close the pool and get reports.
135 pool_stats = pool.close() 135 pool_stats = pool.close()
210 current_records.incremental_count += 1 210 current_records.incremental_count += 1
211 logger.debug(format_timed( 211 logger.debug(format_timed(
212 start_time, "cache is assumed valid", colored=False)) 212 start_time, "cache is assumed valid", colored=False))
213 return True 213 return True
214 214
215 def _bakeRealm(self, pool, pplist): 215 def _bakeRealm(self, pool, pppass, record_histories, pplist):
216 # Start with the first pass, where we iterate on the content sources' 216 # Start with the first pass, where we iterate on the content sources'
217 # items and run jobs on those. 217 # items and run jobs on those.
218 pool.userdata.cur_pass = 0 218 pool.userdata.cur_pass = 0
219 next_pass_jobs = {} 219 next_pass_jobs = {}
220 pool.userdata.next_pass_jobs = next_pass_jobs 220 pool.userdata.next_pass_jobs = next_pass_jobs
221 queued_any_job = False
221 for ppinfo in pplist: 222 for ppinfo in pplist:
222 src = ppinfo.source 223 src = ppinfo.source
223 pp = ppinfo.pipeline 224 pp = ppinfo.pipeline
224 225
225 logger.debug( 226 logger.debug(
226 "Queuing jobs for source '%s' using pipeline '%s' (pass 0)." % 227 "Queuing jobs for source '%s' using pipeline '%s' (pass 0)." %
227 (src.name, pp.PIPELINE_NAME)) 228 (src.name, pp.PIPELINE_NAME))
228 229
229 next_pass_jobs[src.name] = [] 230 next_pass_jobs[src.name] = []
230 jobs = pp.createJobs() 231 jcctx = PipelineJobCreateContext(pppass, record_histories)
231 pool.queueJobs(jobs) 232 jobs = pp.createJobs(jcctx)
233 if jobs is not None:
234 pool.queueJobs(jobs)
235 queued_any_job = True
236
237 if not queued_any_job:
238 logger.debug("No jobs queued! Bailing out of this bake pass.")
239 return
240
232 pool.wait() 241 pool.wait()
233 242
234 # Now let's see if any job created a follow-up job. Let's keep 243 # Now let's see if any job created a follow-up job. Let's keep
235 # processing those jobs as long as they create new ones. 244 # processing those jobs as long as they create new ones.
236 pool.userdata.cur_pass = 1 245 pool.userdata.cur_pass = 1
330 if self.app.debug: 339 if self.app.debug:
331 logger.error(exc_data.traceback) 340 logger.error(exc_data.traceback)
332 341
333 342
334 class _PoolUserData: 343 class _PoolUserData:
335 def __init__(self, baker, ppmngr, current_records): 344 def __init__(self, baker, ppmngr):
336 self.baker = baker 345 self.baker = baker
337 self.ppmngr = ppmngr 346 self.ppmngr = ppmngr
338 self.records = current_records 347 self.records = ppmngr.record_histories.current
339 self.cur_pass = 0 348 self.cur_pass = 0
340 self.next_pass_jobs = {} 349 self.next_pass_jobs = {}