Mercurial > piecrust2
comparison piecrust/sources/blogarchives.py @ 1136:5f97b5b59dfe
bake: Optimize cache handling for the baking process.
- Get rid of the 2-level pipeline runs... handle a single set of passes.
- Go back to load/render segments/layout passes for pages.
- Add descriptions of what each job batch does.
- Improve the taxonomy pipeline so it doesn't re-bake terms that don't need
to be re-baked.
- Simplify some of the code.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Mon, 23 Apr 2018 21:47:49 -0700 |
parents | ba809c221a27 |
children | 9f3e702a8a69 |
comparison
equal
deleted
inserted
replaced
1135:6350ee084273 | 1136:5f97b5b59dfe |
---|---|
5 from piecrust.data.filters import PaginationFilter, IFilterClause | 5 from piecrust.data.filters import PaginationFilter, IFilterClause |
6 from piecrust.dataproviders.pageiterator import ( | 6 from piecrust.dataproviders.pageiterator import ( |
7 PageIterator, HardCodedFilterIterator, DateSortIterator) | 7 PageIterator, HardCodedFilterIterator, DateSortIterator) |
8 from piecrust.page import Page | 8 from piecrust.page import Page |
9 from piecrust.pipelines._pagebaker import PageBaker | 9 from piecrust.pipelines._pagebaker import PageBaker |
10 from piecrust.pipelines._pagerecords import ( | 10 from piecrust.pipelines._pagerecords import PagePipelineRecordEntry |
11 PagePipelineRecordEntry, | |
12 add_page_job_result, merge_job_result_into_record_entry) | |
13 from piecrust.pipelines.base import ( | 11 from piecrust.pipelines.base import ( |
14 ContentPipeline, | 12 ContentPipeline, |
15 create_job, get_record_name_for_source, content_item_from_job) | 13 create_job, get_record_name_for_source) |
16 from piecrust.routing import RouteParameter | 14 from piecrust.routing import RouteParameter |
17 from piecrust.sources.base import ContentItem | 15 from piecrust.sources.base import ContentItem |
18 from piecrust.sources.generator import GeneratorSourceBase | 16 from piecrust.sources.generator import GeneratorSourceBase |
19 from piecrust.sources.list import ListSource | 17 from piecrust.sources.list import ListSource |
20 | 18 |
191 | 189 |
192 entry = rec_fac(record_entry_spec) | 190 entry = rec_fac(record_entry_spec) |
193 current_record.addEntry(entry) | 191 current_record.addEntry(entry) |
194 | 192 |
195 if len(jobs) > 0: | 193 if len(jobs) > 0: |
196 return jobs | 194 return jobs, "archive" |
197 return None | 195 return None, None |
198 | 196 |
199 def run(self, job, ctx, result): | 197 def run(self, job, ctx, result): |
200 year = job['year'] | 198 year = job['year'] |
201 content_item = ContentItem('_index', | 199 content_item = ContentItem('_index', |
202 {'year': year, | 200 {'year': year, |
204 page = Page(self.source, content_item) | 202 page = Page(self.source, content_item) |
205 | 203 |
206 prev_entry = ctx.previous_entry | 204 prev_entry = ctx.previous_entry |
207 rdr_subs = self._pagebaker.bake(page, prev_entry) | 205 rdr_subs = self._pagebaker.bake(page, prev_entry) |
208 | 206 |
209 add_page_job_result(result) | |
210 result['subs'] = rdr_subs | 207 result['subs'] = rdr_subs |
211 result['year'] = page.source_metadata['year'] | 208 result['year'] = page.source_metadata['year'] |
212 | 209 |
213 def handleJobResult(self, result, ctx): | 210 def handleJobResult(self, result, ctx): |
214 existing = ctx.record_entry | 211 existing = ctx.record_entry |
215 merge_job_result_into_record_entry(existing, result) | 212 existing.subs = result['subs'] |
216 existing.year = result['year'] | 213 existing.year = result['year'] |
217 | 214 |
218 def postJobRun(self, ctx): | 215 def postJobRun(self, ctx): |
219 # Create bake entries for the years that were *not* dirty. | 216 # Create bake entries for the years that were *not* dirty. |
220 # Otherwise, when checking for deleted pages, we would not find any | 217 # Otherwise, when checking for deleted pages, we would not find any |
241 current_records = ctx.record_histories.current | 238 current_records = ctx.record_histories.current |
242 cur_rec = current_records.getRecord(record_name) | 239 cur_rec = current_records.getRecord(record_name) |
243 for cur_entry in cur_rec.getEntries(): | 240 for cur_entry in cur_rec.getEntries(): |
244 dt = datetime.datetime.fromtimestamp(cur_entry.timestamp) | 241 dt = datetime.datetime.fromtimestamp(cur_entry.timestamp) |
245 all_years.add(dt.year) | 242 all_years.add(dt.year) |
246 if cur_entry.was_any_sub_baked: | 243 if cur_entry.hasFlag( |
244 PagePipelineRecordEntry.FLAG_SEGMENTS_RENDERED): | |
247 dirty_years.add(dt.year) | 245 dirty_years.add(dt.year) |
248 | 246 |
249 self._all_years = all_years | 247 self._all_years = all_years |
250 self._dirty_years = dirty_years | 248 self._dirty_years = dirty_years |
251 | 249 |