Mercurial > piecrust2
comparison piecrust/baking/baker.py @ 871:504ddb370df8
refactor: Fixing some issues with baking assets.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Tue, 13 Jun 2017 22:30:27 -0700 |
parents | 8d25f76fce98 |
children | d1095774bfcf |
comparison
equal
deleted
inserted
replaced
870:48d25fd68b8d | 871:504ddb370df8 |
---|---|
9 PipelineJobCreateContext, PipelineMergeRecordContext, PipelineManager, | 9 PipelineJobCreateContext, PipelineMergeRecordContext, PipelineManager, |
10 get_pipeline_name_for_source) | 10 get_pipeline_name_for_source) |
11 from piecrust.pipelines.records import ( | 11 from piecrust.pipelines.records import ( |
12 MultiRecordHistory, MultiRecord, RecordEntry, | 12 MultiRecordHistory, MultiRecord, RecordEntry, |
13 load_records) | 13 load_records) |
14 from piecrust.sources.base import REALM_USER, REALM_THEME | 14 from piecrust.sources.base import REALM_USER, REALM_THEME, REALM_NAMES |
15 | 15 |
16 | 16 |
17 logger = logging.getLogger(__name__) | 17 logger = logging.getLogger(__name__) |
18 | 18 |
19 | 19 |
40 logger.debug(" Bake Output: %s" % self.out_dir) | 40 logger.debug(" Bake Output: %s" % self.out_dir) |
41 logger.debug(" Root URL: %s" % self.app.config.get('site/root')) | 41 logger.debug(" Root URL: %s" % self.app.config.get('site/root')) |
42 | 42 |
43 # Get into bake mode. | 43 # Get into bake mode. |
44 self.app.config.set('baker/is_baking', True) | 44 self.app.config.set('baker/is_baking', True) |
45 self.app.config.set('site/base_asset_url_format', '%uri') | 45 self.app.config.set('site/asset_url_format', '%page_uri%/%filename%') |
46 | 46 |
47 # Make sure the output directory exists. | 47 # Make sure the output directory exists. |
48 if not os.path.isdir(self.out_dir): | 48 if not os.path.isdir(self.out_dir): |
49 os.makedirs(self.out_dir, 0o755) | 49 os.makedirs(self.out_dir, 0o755) |
50 | 50 |
117 ppinfo.pipeline.PASS_NUM, {}) | 117 ppinfo.pipeline.PASS_NUM, {}) |
118 pplist = pp_by_realm.setdefault( | 118 pplist = pp_by_realm.setdefault( |
119 ppinfo.pipeline.source.config['realm'], []) | 119 ppinfo.pipeline.source.config['realm'], []) |
120 pplist.append(ppinfo) | 120 pplist.append(ppinfo) |
121 | 121 |
122 for pp_pass in sorted(pp_by_pass_and_realm.keys()): | 122 for pp_pass_num in sorted(pp_by_pass_and_realm.keys()): |
123 logger.debug("Pipelines pass %d" % pp_pass) | 123 logger.debug("Pipelines pass %d" % pp_pass_num) |
124 pp_by_realm = pp_by_pass_and_realm[pp_pass] | 124 pp_by_realm = pp_by_pass_and_realm[pp_pass_num] |
125 for realm in realm_list: | 125 for realm in realm_list: |
126 pplist = pp_by_realm.get(realm) | 126 pplist = pp_by_realm.get(realm) |
127 if pplist is not None: | 127 if pplist is not None: |
128 self._bakeRealm(pool, pp_pass, record_histories, pplist) | 128 self._bakeRealm( |
129 pool, record_histories, pp_pass_num, realm, pplist) | |
129 | 130 |
130 # Handle deletions, collapse records, etc. | 131 # Handle deletions, collapse records, etc. |
131 ppmngr.postJobRun() | 132 ppmngr.postJobRun() |
132 ppmngr.deleteStaleOutputs() | 133 ppmngr.deleteStaleOutputs() |
133 ppmngr.collapseRecords() | 134 ppmngr.collapseRecords() |
211 current_records.incremental_count += 1 | 212 current_records.incremental_count += 1 |
212 logger.debug(format_timed( | 213 logger.debug(format_timed( |
213 start_time, "cache is assumed valid", colored=False)) | 214 start_time, "cache is assumed valid", colored=False)) |
214 return True | 215 return True |
215 | 216 |
216 def _bakeRealm(self, pool, pppass, record_histories, pplist): | 217 def _bakeRealm(self, pool, record_histories, pp_pass_num, realm, pplist): |
217 # Start with the first pass, where we iterate on the content sources' | 218 # Start with the first pass, where we iterate on the content sources' |
218 # items and run jobs on those. | 219 # items and run jobs on those. |
219 pool.userdata.cur_pass = 0 | 220 pool.userdata.cur_pass = 0 |
220 next_pass_jobs = {} | 221 next_pass_jobs = {} |
221 pool.userdata.next_pass_jobs = next_pass_jobs | 222 pool.userdata.next_pass_jobs = next_pass_jobs |
222 queued_any_job = False | 223 |
224 start_time = time.perf_counter() | |
225 job_count = 0 | |
226 realm_name = REALM_NAMES[realm].lower() | |
227 | |
223 for ppinfo in pplist: | 228 for ppinfo in pplist: |
224 src = ppinfo.source | 229 src = ppinfo.source |
225 pp = ppinfo.pipeline | 230 pp = ppinfo.pipeline |
226 | 231 |
227 logger.debug( | 232 logger.debug( |
228 "Queuing jobs for source '%s' using pipeline '%s' (pass 0)." % | 233 "Queuing jobs for source '%s' using pipeline '%s' " |
229 (src.name, pp.PIPELINE_NAME)) | 234 "(%s, pass 0)." % |
235 (src.name, pp.PIPELINE_NAME, realm_name)) | |
230 | 236 |
231 next_pass_jobs[src.name] = [] | 237 next_pass_jobs[src.name] = [] |
232 jcctx = PipelineJobCreateContext(pppass, record_histories) | 238 jcctx = PipelineJobCreateContext(pp_pass_num, record_histories) |
233 jobs = pp.createJobs(jcctx) | 239 jobs = pp.createJobs(jcctx) |
234 if jobs is not None: | 240 if jobs is not None: |
241 job_count += len(jobs) | |
235 pool.queueJobs(jobs) | 242 pool.queueJobs(jobs) |
236 queued_any_job = True | 243 |
237 | 244 if job_count == 0: |
238 if not queued_any_job: | |
239 logger.debug("No jobs queued! Bailing out of this bake pass.") | 245 logger.debug("No jobs queued! Bailing out of this bake pass.") |
240 return | 246 return |
241 | 247 |
242 pool.wait() | 248 pool.wait() |
249 | |
250 logger.info(format_timed( | |
251 start_time, "%d pipeline jobs completed (%s, pass 0)." % | |
252 (job_count, realm_name))) | |
243 | 253 |
244 # Now let's see if any job created a follow-up job. Let's keep | 254 # Now let's see if any job created a follow-up job. Let's keep |
245 # processing those jobs as long as they create new ones. | 255 # processing those jobs as long as they create new ones. |
246 pool.userdata.cur_pass = 1 | 256 pool.userdata.cur_pass = 1 |
247 while True: | 257 while True: |
248 had_any_job = False | |
249 | |
250 # Make a copy of out next pass jobs and reset the list, so | 258 # Make a copy of out next pass jobs and reset the list, so |
251 # the first jobs to be processed don't mess it up as we're | 259 # the first jobs to be processed don't mess it up as we're |
252 # still iterating on it. | 260 # still iterating on it. |
253 next_pass_jobs = pool.userdata.next_pass_jobs | 261 next_pass_jobs = pool.userdata.next_pass_jobs |
254 pool.userdata.next_pass_jobs = {} | 262 pool.userdata.next_pass_jobs = {} |
255 | 263 |
264 start_time = time.perf_counter() | |
265 job_count = 0 | |
266 | |
256 for sn, jobs in next_pass_jobs.items(): | 267 for sn, jobs in next_pass_jobs.items(): |
257 if jobs: | 268 if jobs: |
258 logger.debug( | 269 logger.debug( |
259 "Queuing jobs for source '%s' (pass %d)." % | 270 "Queuing jobs for source '%s' (%s, pass %d)." % |
260 (sn, pool.userdata.cur_pass)) | 271 (sn, realm_name, pool.userdata.cur_pass)) |
272 job_count += len(jobs) | |
261 pool.userdata.next_pass_jobs[sn] = [] | 273 pool.userdata.next_pass_jobs[sn] = [] |
262 pool.queueJobs(jobs) | 274 pool.queueJobs(jobs) |
263 had_any_job = True | 275 |
264 | 276 if job_count == 0: |
265 if not had_any_job: | |
266 break | 277 break |
267 | 278 |
268 pool.wait() | 279 pool.wait() |
280 | |
281 logger.info(format_timed( | |
282 start_time, | |
283 "%d pipeline jobs completed (%s, pass %d)." % | |
284 (job_count, realm_name, pool.userdata.cur_pass))) | |
285 | |
269 pool.userdata.cur_pass += 1 | 286 pool.userdata.cur_pass += 1 |
270 | 287 |
271 def _logErrors(self, item_spec, errors): | 288 def _logErrors(self, item_spec, errors): |
272 logger.error("Errors found in %s:" % item_spec) | 289 logger.error("Errors found in %s:" % item_spec) |
273 for e in errors: | 290 for e in errors: |