Mercurial > piecrust2
comparison piecrust/processing/base.py @ 133:9e4c2e68a129
Optimize server for files that already exist.
* Only try to find new assets if no previously existing asset or page
could be used.
* Tidy up a bit the API for passing and returning bake/process records.
* Put the process record in its place.
| author | Ludovic Chabant <ludovic@chabant.com> |
|---|---|
| date | Tue, 18 Nov 2014 21:32:04 -0800 |
| parents | 3080b6d02f40 |
| children | 308d5180bf81 |
comparison
equal
deleted
inserted
replaced
| 132:3834e2ef0cf2 | 133:9e4c2e68a129 |
|---|---|
| 135 def filterProcessors(self, authorized_names): | 135 def filterProcessors(self, authorized_names): |
| 136 self.processors = list(filter( | 136 self.processors = list(filter( |
| 137 lambda p: p.PROCESSOR_NAME in authorized_names, | 137 lambda p: p.PROCESSOR_NAME in authorized_names, |
| 138 self.processors)) | 138 self.processors)) |
| 139 | 139 |
| 140 def run(self, src_dir_or_file=None, new_only=False): | 140 def run(self, src_dir_or_file=None, *, |
| 141 new_only=False, delete=True, | |
| 142 previous_record=None, save_record=True): | |
| 141 # Invoke pre-processors. | 143 # Invoke pre-processors. |
| 142 for proc in self.processors: | 144 for proc in self.processors: |
| 143 proc.onPipelineStart(self) | 145 proc.onPipelineStart(self) |
| 144 | 146 |
| 145 # Sort our processors again in case the pre-process step involved | 147 # Sort our processors again in case the pre-process step involved |
| 146 # patching the processors with some new ones. | 148 # patching the processors with some new ones. |
| 147 self.processors.sort(key=lambda p: p.priority) | 149 self.processors.sort(key=lambda p: p.priority) |
| 148 | 150 |
| 149 # Create the pipeline record. | 151 # Create the pipeline record. |
| 150 record = TransitionalProcessorPipelineRecord() | 152 record = TransitionalProcessorPipelineRecord() |
| 151 record_cache = self.app.cache.getCache('baker') | 153 record_cache = self.app.cache.getCache('proc') |
| 152 record_name = ( | 154 record_name = ( |
| 153 'assets_' + | |
| 154 hashlib.md5(self.out_dir.encode('utf8')).hexdigest() + | 155 hashlib.md5(self.out_dir.encode('utf8')).hexdigest() + |
| 155 '.record') | 156 '.record') |
| 156 if not self.force and record_cache.has(record_name): | 157 if previous_record: |
| 158 record.setPrevious(previous_record) | |
| 159 elif not self.force and record_cache.has(record_name): | |
| 157 t = time.clock() | 160 t = time.clock() |
| 158 record.loadPrevious(record_cache.getCachePath(record_name)) | 161 record.loadPrevious(record_cache.getCachePath(record_name)) |
| 159 logger.debug(format_timed(t, 'loaded previous bake record', | 162 logger.debug(format_timed(t, 'loaded previous bake record', |
| 160 colored=False)) | 163 colored=False)) |
| 164 logger.debug("Got %d entries in process record." % | |
| 165 len(record.previous.entries)) | |
| 161 | 166 |
| 162 # Create the workers. | 167 # Create the workers. |
| 163 pool = [] | 168 pool = [] |
| 164 queue = Queue() | 169 queue = Queue() |
| 165 abort = threading.Event() | 170 abort = threading.Event() |
| 202 w.join() | 207 w.join() |
| 203 if abort.is_set(): | 208 if abort.is_set(): |
| 204 raise Exception("Worker pool was aborted.") | 209 raise Exception("Worker pool was aborted.") |
| 205 | 210 |
| 206 # Handle deletions. | 211 # Handle deletions. |
| 207 if not new_only: | 212 if delete and not new_only: |
| 208 for path, reason in record.getDeletions(): | 213 for path, reason in record.getDeletions(): |
| 209 logger.debug("Removing '%s': %s" % (path, reason)) | 214 logger.debug("Removing '%s': %s" % (path, reason)) |
| 210 os.remove(path) | 215 os.remove(path) |
| 211 logger.info('[delete] %s' % path) | 216 logger.info('[delete] %s' % path) |
| 212 | 217 |
| 213 # Invoke post-processors. | 218 # Invoke post-processors. |
| 214 for proc in self.processors: | 219 for proc in self.processors: |
| 215 proc.onPipelineEnd(self) | 220 proc.onPipelineEnd(self) |
| 216 | 221 |
| 217 # Save the process record. | 222 # Finalize the process record. |
| 218 t = time.clock() | |
| 219 record.current.process_time = time.time() | 223 record.current.process_time = time.time() |
| 220 record.current.out_dir = self.out_dir | 224 record.current.out_dir = self.out_dir |
| 221 record.collapseRecords() | 225 record.collapseRecords() |
| 222 record.saveCurrent(record_cache.getCachePath(record_name)) | 226 |
| 223 logger.debug(format_timed(t, 'saved bake record', colored=False)) | 227 # Save the process record. |
| 224 | 228 if save_record: |
| 225 return record | 229 t = time.clock() |
| 230 record.saveCurrent(record_cache.getCachePath(record_name)) | |
| 231 logger.debug(format_timed(t, 'saved bake record', colored=False)) | |
| 232 | |
| 233 return record.detach() | |
| 226 | 234 |
| 227 def processDirectory(self, ctx, start_dir, new_only=False): | 235 def processDirectory(self, ctx, start_dir, new_only=False): |
| 228 for dirpath, dirnames, filenames in os.walk(start_dir): | 236 for dirpath, dirnames, filenames in os.walk(start_dir): |
| 229 rel_dirpath = os.path.relpath(dirpath, start_dir) | 237 rel_dirpath = os.path.relpath(dirpath, start_dir) |
| 230 dirnames[:] = [d for d in dirnames | 238 dirnames[:] = [d for d in dirnames |
