Mercurial > piecrust2
changeset 158:1187739e5a19
Fix some indentation and line lengths.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Wed, 31 Dec 2014 16:56:55 -0800 |
parents | 55910ab4bfea |
children | 232989a6df36 |
files | piecrust/baking/baker.py piecrust/baking/scheduler.py piecrust/data/base.py piecrust/environment.py piecrust/rendering.py |
diffstat | 5 files changed, 66 insertions(+), 43 deletions(-) [+] |
line wrap: on
line diff
--- a/piecrust/baking/baker.py Sat Dec 27 18:17:30 2014 -0800 +++ b/piecrust/baking/baker.py Wed Dec 31 16:56:55 2014 -0800 @@ -4,12 +4,13 @@ import hashlib import logging import threading -from piecrust.baking.records import (TransitionalBakeRecord, - BakeRecordPageEntry) +from piecrust.baking.records import ( + TransitionalBakeRecord, BakeRecordPageEntry) from piecrust.baking.scheduler import BakeScheduler from piecrust.baking.single import (BakingError, PageBaker) from piecrust.chefutil import format_timed, log_friendly_exception -from piecrust.sources.base import (PageFactory, +from piecrust.sources.base import ( + PageFactory, REALM_NAMES, REALM_USER, REALM_THEME) @@ -18,7 +19,7 @@ class Baker(object): def __init__(self, app, out_dir, force=False, portable=False, - no_assets=False, num_workers=4): + no_assets=False, num_workers=4): assert app and out_dir self.app = app self.out_dir = out_dir @@ -60,8 +61,9 @@ if not self.force and record_cache.has(record_name): t = time.clock() record.loadPrevious(record_cache.getCachePath(record_name)) - logger.debug(format_timed(t, 'loaded previous bake record', - colored=False)); + logger.debug(format_timed( + t, 'loaded previous bake record', + colored=False)) # Figure out if we need to clean the cache because important things # have changed. @@ -138,12 +140,14 @@ self.force = True record.incremental_count = 0 record.clearPrevious() - logger.info(format_timed(start_time, - "cleaned cache (reason: %s)" % reason)) + logger.info(format_timed( + start_time, + "cleaned cache (reason: %s)" % reason)) else: record.incremental_count += 1 - logger.debug(format_timed(start_time, "cache is assumed valid", - colored=False)) + logger.debug(format_timed( + start_time, "cache is assumed valid", + colored=False)) def _bakeRealm(self, record, realm, srclist): # Gather all page factories from the sources and queue them @@ -155,7 +159,8 @@ factories = source.getPageFactories() for fac in factories: if fac.path in self.taxonomy_pages: - logger.debug("Skipping taxonomy page: %s:%s" % + logger.debug( + "Skipping taxonomy page: %s:%s" % (source.name, fac.ref_spec)) continue @@ -164,8 +169,8 @@ route = self.app.getRoute(source.name, fac.metadata) if route is None: - entry.errors.append("Can't get route for page: %s" % - fac.ref_spec) + entry.errors.append( + "Can't get route for page: %s" % fac.ref_spec) logger.error(entry.errors[-1]) continue @@ -238,25 +243,30 @@ if len(terms) == 0: continue - logger.debug("Baking '%s' for source '%s': %s" % + logger.debug( + "Baking '%s' for source '%s': %s" % (tax_name, source_name, terms)) tax = self.app.getTaxonomy(tax_name) route = self.app.getTaxonomyRoute(tax_name, source_name) tax_page_ref = tax.getPageRef(source_name) if not tax_page_ref.exists: - logger.debug("No taxonomy page found at '%s', skipping." % + logger.debug( + "No taxonomy page found at '%s', skipping." % tax.page_ref) continue tax_page_source = tax_page_ref.source tax_page_rel_path = tax_page_ref.rel_path - logger.debug("Using taxonomy page: %s:%s" % + logger.debug( + "Using taxonomy page: %s:%s" % (tax_page_source.name, tax_page_rel_path)) for term in terms: - fac = PageFactory(tax_page_source, tax_page_rel_path, + fac = PageFactory( + tax_page_source, tax_page_rel_path, {tax.term_name: term}) - logger.debug("Queuing: %s [%s, %s]" % + logger.debug( + "Queuing: %s [%s, %s]" % (fac.ref_spec, tax_name, term)) entry = BakeRecordPageEntry(fac, tax_name, term) record.addEntry(entry) @@ -281,7 +291,8 @@ queue = BakeScheduler(record) abort = threading.Event() for i in range(pool_size): - ctx = BakeWorkerContext(self.app, self.out_dir, self.force, + ctx = BakeWorkerContext( + self.app, self.out_dir, self.force, record, queue, abort) worker = BakeWorker(i, ctx) pool.append(worker) @@ -307,7 +318,7 @@ class BakeWorkerContext(object): def __init__(self, app, out_dir, force, record, work_queue, - abort_event): + abort_event): self.app = app self.out_dir = out_dir self.force = force @@ -318,7 +329,7 @@ class BakeWorkerJob(object): def __init__(self, factory, route, record_entry, - taxonomy_name=None, taxonomy_term=None): + taxonomy_name=None, taxonomy_term=None): self.factory = factory self.route = route self.record_entry = record_entry @@ -336,7 +347,8 @@ self.wid = wid self.ctx = ctx self.abort_exception = None - self._page_baker = PageBaker(ctx.app, ctx.out_dir, ctx.force, + self._page_baker = PageBaker( + ctx.app, ctx.out_dir, ctx.force, ctx.record) def run(self): @@ -344,10 +356,10 @@ try: job = self.ctx.work_queue.getNextJob(wait_timeout=1) if job is None: - logger.debug("[%d] No more work... shutting down." % + logger.debug( + "[%d] No more work... shutting down." % self.wid) break - self._unsafeRun(job) logger.debug("[%d] Done with page." % self.wid) self.ctx.work_queue.onJobFinished(job) @@ -364,7 +376,8 @@ entry = job.record_entry try: - self._page_baker.bake(job.factory, job.route, entry, + self._page_baker.bake( + job.factory, job.route, entry, taxonomy_name=job.taxonomy_name, taxonomy_term=job.taxonomy_term) except BakingError as ex: @@ -379,7 +392,8 @@ friendly_count = '' if entry.num_subs > 1: friendly_count = ' (%d pages)' % entry.num_subs - logger.info(format_timed(start_time, '[%d] %s%s' % + logger.info(format_timed( + start_time, '[%d] %s%s' % (self.wid, friendly_uri, friendly_count))) elif entry.errors: for e in entry.errors:
--- a/piecrust/baking/scheduler.py Sat Dec 27 18:17:30 2014 -0800 +++ b/piecrust/baking/scheduler.py Wed Dec 31 16:56:55 2014 -0800 @@ -19,14 +19,14 @@ def addJob(self, job): logger.debug("Queuing job '%s:%s'." % ( - job.factory.source.name, job.factory.rel_path)) + job.factory.source.name, job.factory.rel_path)) with self._lock: self.jobs.append(job) self._added_event.set() def onJobFinished(self, job): logger.debug("Removing job '%s:%s'." % ( - job.factory.source.name, job.factory.rel_path)) + job.factory.source.name, job.factory.rel_path)) with self._lock: self._active_jobs.remove(job) self._done_event.set() @@ -75,13 +75,15 @@ self.jobs.append(job) return self._WAIT - logger.debug("Job '%s:%s' is ready to go, moving to active " - "queue." % (job.factory.source.name, job.factory.rel_path)) + logger.debug( + "Job '%s:%s' is ready to go, moving to active queue." % + (job.factory.source.name, job.factory.rel_path)) self._active_jobs.append(job) return job def _isJobReady(self, job): - e = self.record.getPreviousEntry(job.factory.source.name, + e = self.record.getPreviousEntry( + job.factory.source.name, job.factory.rel_path) if not e: return (True, None) @@ -91,7 +93,7 @@ if any(filter(lambda j: j.factory.source.name == sn, self.jobs)): return (False, sn) if any(filter(lambda j: j.factory.source.name == sn, - self._active_jobs)): + self._active_jobs)): return (False, sn) return (True, None)
--- a/piecrust/data/base.py Sat Dec 27 18:17:30 2014 -0800 +++ b/piecrust/data/base.py Wed Dec 31 16:56:55 2014 -0800 @@ -62,8 +62,9 @@ try: self._values[name] = loader(self, name) except Exception as ex: - raise Exception("Error while loading attribute '%s' for: %s" - % (name, self._page.rel_path)) from ex + raise Exception( + "Error while loading attribute '%s' for: %s" % + (name, self._page.rel_path)) from ex # We need to double-check `_loaders` here because # the loader could have removed all loaders, which @@ -90,8 +91,8 @@ if self._loaders is None: self._loaders = {} if attr_name in self._loaders: - raise Exception("A loader has already been mapped for: %s" % - attr_name) + raise Exception( + "A loader has already been mapped for: %s" % attr_name) self._loaders[attr_name] = loader def _load(self): @@ -101,7 +102,9 @@ try: self._loadCustom() except Exception as ex: - raise Exception("Error while loading data for: %s" % self._page.rel_path) from ex + raise Exception( + "Error while loading data for: %s" % + self._page.rel_path) from ex def _loadCustom(self): pass @@ -129,7 +132,8 @@ page_url = self._get_uri() self.setValue('url', page_url) self.setValue('slug', get_slug(self._page.app, page_url)) - self.setValue('timestamp', + self.setValue( + 'timestamp', time.mktime(self.page.datetime.timetuple())) date_format = self.page.app.config.get('site/date_format') if date_format: @@ -156,12 +160,13 @@ if do_render: uri = self._get_uri() try: - from piecrust.rendering import (PageRenderingContext, - render_page_segments) + from piecrust.rendering import ( + PageRenderingContext, render_page_segments) ctx = PageRenderingContext(self._page, uri) segs = render_page_segments(ctx) except Exception as e: - raise Exception("Error rendering segments for '%s'" % uri) from e + raise Exception( + "Error rendering segments for '%s'" % uri) from e else: segs = {} for name in self.page.config.get('segments'):
--- a/piecrust/environment.py Sat Dec 27 18:17:30 2014 -0800 +++ b/piecrust/environment.py Wed Dec 31 16:56:55 2014 -0800 @@ -60,7 +60,8 @@ logger.debug("'%s' found in file-system cache." % key) item_raw = self.fs_cache.read(fs_key) - item = json.loads(item_raw, + item = json.loads( + item_raw, object_pairs_hook=collections.OrderedDict) self.cache.put(key, item) return item
--- a/piecrust/rendering.py Sat Dec 27 18:17:30 2014 -0800 +++ b/piecrust/rendering.py Wed Dec 31 16:56:55 2014 -0800 @@ -99,7 +99,8 @@ if repo: cache_key = '%s:%s' % (ctx.uri, ctx.page_num) page_time = page.path_mtime - contents = repo.get(cache_key, + contents = repo.get( + cache_key, lambda: _do_render_page_segments(page, page_data), fs_cache_time=page_time) else: