Mercurial > piecrust2
changeset 1144:9f3e702a8a69
bake: Give unique source specs to each taxonomy or blog archive page.
This prevents caching issues in some situations, leading to one tag page reusing the data from a previous tag page.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Tue, 05 Jun 2018 22:05:46 -0700 |
parents | 1c324407bd1f |
children | e94737572542 |
files | piecrust/sources/blogarchives.py piecrust/sources/taxonomy.py |
diffstat | 2 files changed, 11 insertions(+), 24 deletions(-) [+] |
line wrap: on
line diff
--- a/piecrust/sources/blogarchives.py Tue Jun 05 21:59:41 2018 -0700 +++ b/piecrust/sources/blogarchives.py Tue Jun 05 22:05:46 2018 -0700 @@ -42,7 +42,7 @@ def findContentFromRoute(self, route_params): year = route_params['year'] return ContentItem( - '_index', + '_index[%s]' % year, {'route_params': {'year': year}}) def prepareRenderContext(self, ctx): @@ -165,11 +165,6 @@ self._pagebaker.stopWriterQueue() def createJobs(self, ctx): - logger.debug("Caching template page for blog archives '%s'." % - self.inner_source.name) - page = self.app.getPage(self.source, ContentItem('_index', {})) - page._load() - logger.debug("Building blog archives for: %s" % self.inner_source.name) self._buildDirtyYears(ctx) @@ -181,13 +176,12 @@ current_record = ctx.current_record for y in self._dirty_years: - record_entry_spec = '_index[%04d]' % y + item_spec = '_index[%04d]' % y - jobs.append(create_job(self, '_index', - year=y, - record_entry_spec=record_entry_spec)) + jobs.append(create_job(self, item_spec, + year=y)) - entry = rec_fac(record_entry_spec) + entry = rec_fac(item_spec) current_record.addEntry(entry) if len(jobs) > 0: @@ -196,7 +190,7 @@ def run(self, job, ctx, result): year = job['year'] - content_item = ContentItem('_index', + content_item = ContentItem('_index[%04d]' % year, {'year': year, 'route_params': {'year': year}}) page = Page(self.source, content_item)
--- a/piecrust/sources/taxonomy.py Tue Jun 05 21:59:41 2018 -0700 +++ b/piecrust/sources/taxonomy.py Tue Jun 05 22:05:46 2018 -0700 @@ -84,7 +84,7 @@ def findContentFromRoute(self, route_params): slugified_term = route_params[self.taxonomy.term_name] - spec = '_index' + spec = '_index[%s]' % slugified_term metadata = {'term': slugified_term, 'route_params': { self.taxonomy.term_name: slugified_term} @@ -274,11 +274,6 @@ self._pagebaker.stopWriterQueue() def createJobs(self, ctx): - logger.debug("Caching template page for taxonomy '%s'." % - self.taxonomy.name) - page = self.app.getPage(self.source, ContentItem('_index', {})) - page._load() - logger.debug("Building '%s' taxonomy pages for source: %s" % (self.taxonomy.name, self.inner_source.name)) self._analyzer = _TaxonomyTermsAnalyzer(self, ctx.record_histories) @@ -292,14 +287,12 @@ current_record = ctx.current_record for slugified_term in self._analyzer.dirty_slugified_terms: - item_spec = '_index' - record_entry_spec = '_index[%s]' % slugified_term + item_spec = '_index[%s]' % slugified_term jobs.append(create_job(self, item_spec, - term=slugified_term, - record_entry_spec=record_entry_spec)) + term=slugified_term)) - entry = rec_fac(record_entry_spec) + entry = rec_fac(item_spec) current_record.addEntry(entry) if len(jobs) > 0: @@ -308,7 +301,7 @@ def run(self, job, ctx, result): term = job['term'] - content_item = ContentItem('_index', + content_item = ContentItem('_index[%s]' % term, {'term': term, 'route_params': { self.taxonomy.term_name: term}