Mercurial > piecrust2
comparison piecrust/sources/taxonomy.py @ 1144:9f3e702a8a69
bake: Give unique source specs to each taxonomy or blog archive page.
This prevents caching issues in some situations, leading to one tag page reusing the data from a previous tag page.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Tue, 05 Jun 2018 22:05:46 -0700 |
parents | 5f97b5b59dfe |
children |
comparison
equal
deleted
inserted
replaced
1143:1c324407bd1f | 1144:9f3e702a8a69 |
---|---|
82 return [RouteParameter(name, param_type, | 82 return [RouteParameter(name, param_type, |
83 variadic=self.taxonomy.is_multiple)] | 83 variadic=self.taxonomy.is_multiple)] |
84 | 84 |
85 def findContentFromRoute(self, route_params): | 85 def findContentFromRoute(self, route_params): |
86 slugified_term = route_params[self.taxonomy.term_name] | 86 slugified_term = route_params[self.taxonomy.term_name] |
87 spec = '_index' | 87 spec = '_index[%s]' % slugified_term |
88 metadata = {'term': slugified_term, | 88 metadata = {'term': slugified_term, |
89 'route_params': { | 89 'route_params': { |
90 self.taxonomy.term_name: slugified_term} | 90 self.taxonomy.term_name: slugified_term} |
91 } | 91 } |
92 return ContentItem(spec, metadata) | 92 return ContentItem(spec, metadata) |
272 | 272 |
273 def shutdown(self): | 273 def shutdown(self): |
274 self._pagebaker.stopWriterQueue() | 274 self._pagebaker.stopWriterQueue() |
275 | 275 |
276 def createJobs(self, ctx): | 276 def createJobs(self, ctx): |
277 logger.debug("Caching template page for taxonomy '%s'." % | |
278 self.taxonomy.name) | |
279 page = self.app.getPage(self.source, ContentItem('_index', {})) | |
280 page._load() | |
281 | |
282 logger.debug("Building '%s' taxonomy pages for source: %s" % | 277 logger.debug("Building '%s' taxonomy pages for source: %s" % |
283 (self.taxonomy.name, self.inner_source.name)) | 278 (self.taxonomy.name, self.inner_source.name)) |
284 self._analyzer = _TaxonomyTermsAnalyzer(self, ctx.record_histories) | 279 self._analyzer = _TaxonomyTermsAnalyzer(self, ctx.record_histories) |
285 self._analyzer.analyze() | 280 self._analyzer.analyze() |
286 | 281 |
290 jobs = [] | 285 jobs = [] |
291 rec_fac = self.createRecordEntry | 286 rec_fac = self.createRecordEntry |
292 current_record = ctx.current_record | 287 current_record = ctx.current_record |
293 | 288 |
294 for slugified_term in self._analyzer.dirty_slugified_terms: | 289 for slugified_term in self._analyzer.dirty_slugified_terms: |
295 item_spec = '_index' | 290 item_spec = '_index[%s]' % slugified_term |
296 record_entry_spec = '_index[%s]' % slugified_term | |
297 | 291 |
298 jobs.append(create_job(self, item_spec, | 292 jobs.append(create_job(self, item_spec, |
299 term=slugified_term, | 293 term=slugified_term)) |
300 record_entry_spec=record_entry_spec)) | 294 |
301 | 295 entry = rec_fac(item_spec) |
302 entry = rec_fac(record_entry_spec) | |
303 current_record.addEntry(entry) | 296 current_record.addEntry(entry) |
304 | 297 |
305 if len(jobs) > 0: | 298 if len(jobs) > 0: |
306 return jobs, "taxonomize" | 299 return jobs, "taxonomize" |
307 return None, None | 300 return None, None |
308 | 301 |
309 def run(self, job, ctx, result): | 302 def run(self, job, ctx, result): |
310 term = job['term'] | 303 term = job['term'] |
311 content_item = ContentItem('_index', | 304 content_item = ContentItem('_index[%s]' % term, |
312 {'term': term, | 305 {'term': term, |
313 'route_params': { | 306 'route_params': { |
314 self.taxonomy.term_name: term} | 307 self.taxonomy.term_name: term} |
315 }) | 308 }) |
316 page = Page(self.source, content_item) | 309 page = Page(self.source, content_item) |