Mercurial > piecrust2
comparison piecrust/processing/tree.py @ 215:a47580a0955b
bake: Better error handling for the processing pipeline.
Pipeline jobs now keep track of whether they've seen any errors. This is
aggregated into an overall "success" flag for the processing record. Also, jobs
keep going as long as there's no critical (i.e. internal) failure happening.
Errors raised by processors are also better tracked: the actual processor that
failed, along with the input file, are tracks in the processing record.
The `bake` command returns a failure exit code if processing saw any error.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Sat, 31 Jan 2015 17:08:02 -0800 |
parents | 308d5180bf81 |
children | c4b3a7fd2f87 |
comparison
equal
deleted
inserted
replaced
214:09e350db7f8f | 215:a47580a0955b |
---|---|
20 pass | 20 pass |
21 | 21 |
22 | 22 |
23 class ProcessorNotFoundError(ProcessingTreeError): | 23 class ProcessorNotFoundError(ProcessingTreeError): |
24 pass | 24 pass |
25 | |
26 | |
27 class ProcessorError(ProcessingTreeError): | |
28 def __init__(self, proc_name, in_path, *args): | |
29 super(ProcessorError, self).__init__(*args) | |
30 self.proc_name = proc_name | |
31 self.in_path = in_path | |
32 | |
33 def __str__(self): | |
34 return "Processor %s failed on: %s" % (self.proc_name, self.in_path) | |
25 | 35 |
26 | 36 |
27 class ProcessingTreeNode(object): | 37 class ProcessingTreeNode(object): |
28 def __init__(self, path, available_procs, level=0): | 38 def __init__(self, path, available_procs, level=0): |
29 self.path = path | 39 self.path = path |
152 format_timed( | 162 format_timed( |
153 start_time, "(bypassing structured processing)", | 163 start_time, "(bypassing structured processing)", |
154 colored=False)) | 164 colored=False)) |
155 return True | 165 return True |
156 except Exception as e: | 166 except Exception as e: |
157 raise ProcessingTreeError("Error processing: %s" % | 167 raise ProcessorError(proc.PROCESSOR_NAME, full_path) from e |
158 node.path) from e | |
159 | 168 |
160 # All outputs of a node must go to the same directory, so we can get | 169 # All outputs of a node must go to the same directory, so we can get |
161 # the output directory off of the first output. | 170 # the output directory off of the first output. |
162 base_out_dir = self._getNodeBaseDir(node.outputs[0]) | 171 base_out_dir = self._getNodeBaseDir(node.outputs[0]) |
163 rel_out_dir = os.path.dirname(node.path) | 172 rel_out_dir = os.path.dirname(node.path) |
181 return True | 190 return True |
182 else: | 191 else: |
183 print_node(node, "-> %s [clean]" % out_dir) | 192 print_node(node, "-> %s [clean]" % out_dir) |
184 return False | 193 return False |
185 except Exception as e: | 194 except Exception as e: |
186 raise Exception("Error processing: %s" % node.path) from e | 195 raise ProcessorError(proc.PROCESSOR_NAME, full_path) from e |
187 | 196 |
188 def _computeNodeState(self, node): | 197 def _computeNodeState(self, node): |
189 if node.state != STATE_UNKNOWN: | 198 if node.state != STATE_UNKNOWN: |
190 return | 199 return |
191 | 200 |