Mercurial > piecrust2
comparison piecrust/baking/baker.py @ 39:2f717f961996
Better error reporting and cache validation.
Fix the processor pipeline in the preview server.
Move the `pages` route to first position.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Thu, 21 Aug 2014 22:28:22 -0700 |
parents | 485682a6de50 |
children | 9e058e221108 |
comparison
equal
deleted
inserted
replaced
38:091f99bfbe44 | 39:2f717f961996 |
---|---|
6 import logging | 6 import logging |
7 import threading | 7 import threading |
8 import urllib.request, urllib.error, urllib.parse | 8 import urllib.request, urllib.error, urllib.parse |
9 from queue import Queue, Empty | 9 from queue import Queue, Empty |
10 from piecrust.baking.records import TransitionalBakeRecord, BakeRecordPageEntry | 10 from piecrust.baking.records import TransitionalBakeRecord, BakeRecordPageEntry |
11 from piecrust.chefutil import format_timed | 11 from piecrust.chefutil import format_timed, log_friendly_exception |
12 from piecrust.data.filters import (PaginationFilter, HasFilterClause, | 12 from piecrust.data.filters import (PaginationFilter, HasFilterClause, |
13 IsFilterClause, AndBooleanClause) | 13 IsFilterClause, AndBooleanClause) |
14 from piecrust.processing.base import ProcessorPipeline | 14 from piecrust.processing.base import ProcessorPipeline |
15 from piecrust.rendering import PageRenderingContext, render_page | 15 from piecrust.rendering import PageRenderingContext, render_page |
16 from piecrust.sources.base import (PageFactory, | 16 from piecrust.sources.base import (PageFactory, |
155 try: | 155 try: |
156 logger.debug(" p%d -> %s" % (cur_sub, out_path)) | 156 logger.debug(" p%d -> %s" % (cur_sub, out_path)) |
157 ctx, rp = self._bakeSingle(page, sub_uri, cur_sub, out_path, | 157 ctx, rp = self._bakeSingle(page, sub_uri, cur_sub, out_path, |
158 pagination_filter, custom_data) | 158 pagination_filter, custom_data) |
159 except Exception as ex: | 159 except Exception as ex: |
160 raise Exception("Error baking page '%s' for URI '%s'." % | 160 raise Exception("Error baking page '%s' for URL '%s'." % |
161 (page.ref_spec, uri)) from ex | 161 (page.ref_spec, uri)) from ex |
162 | 162 |
163 # Copy page assets. | 163 # Copy page assets. |
164 if (cur_sub == 1 and self.copy_assets and | 164 if (cur_sub == 1 and self.copy_assets and |
165 ctx.used_assets is not None): | 165 ctx.used_assets is not None): |
261 t = time.clock() | 261 t = time.clock() |
262 record.loadPrevious(record_cache.getCachePath(record_name)) | 262 record.loadPrevious(record_cache.getCachePath(record_name)) |
263 logger.debug(format_timed(t, 'loaded previous bake record', | 263 logger.debug(format_timed(t, 'loaded previous bake record', |
264 colored=False)); | 264 colored=False)); |
265 | 265 |
266 # Figure out if we need to clean the cache because important things | |
267 # have changed. | |
268 self._handleCacheValidity(record) | |
269 | |
266 # Gather all sources by realm -- we're going to bake each realm | 270 # Gather all sources by realm -- we're going to bake each realm |
267 # separately so we can handle "overlaying" (i.e. one realm overrides | 271 # separately so we can handle "overlaying" (i.e. one realm overrides |
268 # another realm's pages). | 272 # another realm's pages). |
269 sources_by_realm = {} | 273 sources_by_realm = {} |
270 for source in self.app.sources: | 274 for source in self.app.sources: |
285 if not self.no_assets: | 289 if not self.no_assets: |
286 self._bakeAssets(record) | 290 self._bakeAssets(record) |
287 | 291 |
288 # Save the bake record. | 292 # Save the bake record. |
289 t = time.clock() | 293 t = time.clock() |
294 record.current.bake_time = time.time() | |
290 record.collapseRecords() | 295 record.collapseRecords() |
291 record.saveCurrent(record_cache.getCachePath(record_name)) | 296 record.saveCurrent(record_cache.getCachePath(record_name)) |
292 logger.debug(format_timed(t, 'saved bake record', colored=False)) | 297 logger.debug(format_timed(t, 'saved bake record', colored=False)) |
293 | 298 |
294 # All done. | 299 # All done. |
295 self.app.config.set('baker/is_baking', False) | 300 self.app.config.set('baker/is_baking', False) |
296 logger.info('-------------------------'); | 301 logger.info('-------------------------'); |
297 logger.info(format_timed(start_time, 'done baking')); | 302 logger.info(format_timed(start_time, 'done baking')); |
303 | |
304 def _handleCacheValidity(self, record): | |
305 start_time = time.clock() | |
306 | |
307 reason = None | |
308 if self.force: | |
309 reason = "ordered to" | |
310 elif not self.app.config.get('__cache_valid'): | |
311 # The configuration file was changed, or we're running a new | |
312 # version of the app. | |
313 reason = "not valid anymore" | |
314 elif not record.previous.bake_time: | |
315 # We have no valid previous bake record. | |
316 reason = "need bake record regeneration" | |
317 else: | |
318 # Check if any template has changed since the last bake. Since | |
319 # there could be some advanced conditional logic going on, we'd | |
320 # better just force a bake from scratch if that's the case. | |
321 max_time = 0 | |
322 for d in self.app.templates_dirs: | |
323 for _, __, filenames in os.walk(d): | |
324 for fn in filenames: | |
325 max_time = max(max_time, os.path.getmtime(fn)) | |
326 if max_time >= record.previous.bake_time: | |
327 reason = "templates modified" | |
328 | |
329 if reason is not None: | |
330 cache_dir = self.app.cache_dir | |
331 if os.path.isdir(cache_dir): | |
332 logger.debug("Cleaning cache: %s" % cache_dir) | |
333 shutil.rmtree(cache_dir) | |
334 self.force = True | |
335 logger.info(format_timed(start_time, | |
336 "cleaned cache (reason: %s)" % reason)) | |
337 else: | |
338 logger.debug(format_timed(start_time, "cache is assumed valid", | |
339 colored=False)) | |
298 | 340 |
299 def _bakeRealm(self, record, realm, srclist): | 341 def _bakeRealm(self, record, realm, srclist): |
300 # Gather all page factories from the sources and queue them | 342 # Gather all page factories from the sources and queue them |
301 # for the workers to pick up. Just skip taxonomy pages for now. | 343 # for the workers to pick up. Just skip taxonomy pages for now. |
302 logger.debug("Baking realm %s" % REALM_NAMES[realm]) | 344 logger.debug("Baking realm %s" % REALM_NAMES[realm]) |
433 | 475 |
434 def _waitOnWorkerPool(self, pool, abort): | 476 def _waitOnWorkerPool(self, pool, abort): |
435 for w in pool: | 477 for w in pool: |
436 w.join() | 478 w.join() |
437 if abort.is_set(): | 479 if abort.is_set(): |
438 raise Exception("Worker pool was aborted.") | 480 excs = [w.abort_exception for w in pool |
481 if w.abort_exception is not None] | |
482 logger.error("%s errors" % len(excs)) | |
483 if self.app.debug: | |
484 for e in excs: | |
485 logger.exception(e) | |
486 else: | |
487 for e in excs: | |
488 log_friendly_exception(logger, e) | |
489 raise Exception("Baking was aborted due to errors.") | |
439 | 490 |
440 | 491 |
441 class BakeWorkerContext(object): | 492 class BakeWorkerContext(object): |
442 def __init__(self, app, out_dir, force, record, work_queue, | 493 def __init__(self, app, out_dir, force, record, work_queue, |
443 abort_event): | 494 abort_event): |
461 return self.factory.source | 512 return self.factory.source |
462 | 513 |
463 | 514 |
464 class BakeWorker(threading.Thread): | 515 class BakeWorker(threading.Thread): |
465 def __init__(self, wid, ctx): | 516 def __init__(self, wid, ctx): |
466 super(BakeWorker, self).__init__() | 517 super(BakeWorker, self).__init__(name=('worker%d' % wid)) |
467 self.wid = wid | 518 self.wid = wid |
468 self.ctx = ctx | 519 self.ctx = ctx |
469 self.num_bakes = 0 | 520 self.abort_exception = None |
470 self._page_baker = PageBaker(ctx.app, ctx.out_dir, ctx.force, | 521 self._page_baker = PageBaker(ctx.app, ctx.out_dir, ctx.force, |
471 ctx.record) | 522 ctx.record) |
472 | 523 |
473 def run(self): | 524 def run(self): |
474 while(not self.ctx.abort_event.is_set()): | 525 while(not self.ctx.abort_event.is_set()): |
482 self._unsafeRun(job) | 533 self._unsafeRun(job) |
483 logger.debug("[%d] Done with page." % self.wid) | 534 logger.debug("[%d] Done with page." % self.wid) |
484 self.ctx.work_queue.task_done() | 535 self.ctx.work_queue.task_done() |
485 except Exception as ex: | 536 except Exception as ex: |
486 self.ctx.abort_event.set() | 537 self.ctx.abort_event.set() |
487 logger.error("[%d] Critical error, aborting." % self.wid) | 538 self.abort_exception = ex |
488 logger.exception(ex) | 539 logger.debug("[%d] Critical error, aborting." % self.wid) |
489 break | 540 break |
490 | 541 |
491 def _unsafeRun(self, job): | 542 def _unsafeRun(self, job): |
492 start_time = time.clock() | 543 start_time = time.clock() |
493 | 544 |
501 friendly_count = '' | 552 friendly_count = '' |
502 if bake_res.num_subs > 1: | 553 if bake_res.num_subs > 1: |
503 friendly_count = ' (%d pages)' % bake_res.num_subs | 554 friendly_count = ' (%d pages)' % bake_res.num_subs |
504 logger.info(format_timed(start_time, '[%d] %s%s' % | 555 logger.info(format_timed(start_time, '[%d] %s%s' % |
505 (self.wid, friendly_uri, friendly_count))) | 556 (self.wid, friendly_uri, friendly_count))) |
506 self.num_bakes += 1 | 557 |
507 |