changeset 215:a47580a0955b

bake: Better error handling for the processing pipeline. Pipeline jobs now keep track of whether they've seen any errors. This is aggregated into an overall "success" flag for the processing record. Also, jobs keep going as long as there's no critical (i.e. internal) failure happening. Errors raised by processors are also better tracked: the actual processor that failed, along with the input file, are tracks in the processing record. The `bake` command returns a failure exit code if processing saw any error.
author Ludovic Chabant <ludovic@chabant.com>
date Sat, 31 Jan 2015 17:08:02 -0800
parents 09e350db7f8f
children c5ada46b281a
files piecrust/commands/builtin/baking.py piecrust/processing/base.py piecrust/processing/records.py piecrust/processing/tree.py
diffstat 4 files changed, 46 insertions(+), 16 deletions(-) [+]
line wrap: on
line diff
--- a/piecrust/commands/builtin/baking.py	Sat Jan 31 16:28:16 2015 -0800
+++ b/piecrust/commands/builtin/baking.py	Sat Jan 31 17:08:02 2015 -0800
@@ -40,19 +40,20 @@
         out_dir = (ctx.args.output or
                    os.path.join(ctx.app.root_dir, '_counter'))
 
+        success = True
         start_time = time.clock()
         try:
             # Bake the site sources.
-            self._bakeSources(ctx, out_dir)
+            success = success & self._bakeSources(ctx, out_dir)
 
             # Bake the assets.
             if not ctx.args.no_assets:
-                self._bakeAssets(ctx, out_dir)
+                success = success & self._bakeAssets(ctx, out_dir)
 
             # All done.
             logger.info('-------------------------')
             logger.info(format_timed(start_time, 'done baking'))
-            return 0
+            return 0 if success else 1
         except Exception as ex:
             if ctx.app.debug:
                 logger.exception(ex)
@@ -65,12 +66,14 @@
                 ctx.app, out_dir,
                 force=ctx.args.force)
         baker.bake()
+        return True
 
     def _bakeAssets(self, ctx, out_dir):
         proc = ProcessorPipeline(
                 ctx.app, out_dir,
                 force=ctx.args.force)
-        proc.run()
+        record = proc.run()
+        return record.success
 
 
 class ShowRecordCommand(ChefCommand):
--- a/piecrust/processing/base.py	Sat Jan 31 16:28:16 2015 -0800
+++ b/piecrust/processing/base.py	Sat Jan 31 17:08:02 2015 -0800
@@ -11,7 +11,8 @@
         ProcessorPipelineRecordEntry, TransitionalProcessorPipelineRecord,
         FLAG_PROCESSED, FLAG_OVERRIDEN, FLAG_BYPASSED_STRUCTURED_PROCESSING)
 from piecrust.processing.tree import (
-        ProcessingTreeBuilder, ProcessingTreeRunner, ProcessingTreeError,
+        ProcessingTreeBuilder, ProcessingTreeRunner,
+        ProcessingTreeError, ProcessorError,
         STATE_DIRTY,
         print_node, get_node_name_tree)
 
@@ -239,8 +240,10 @@
                 self.processDirectory(ctx, path)
 
         # Wait on all workers.
+        record.current.success = True
         for w in pool:
             w.join()
+            record.current.success &= w.success
         if abort.is_set():
             raise Exception("Worker pool was aborted.")
 
@@ -310,6 +313,7 @@
         super(ProcessingWorker, self).__init__()
         self.wid = wid
         self.ctx = ctx
+        self.success = True
 
     def run(self):
         while(not self.ctx.abort_event.is_set()):
@@ -320,11 +324,13 @@
                 break
 
             try:
-                self._unsafeRun(job)
+                success = self._unsafeRun(job)
                 logger.debug("[%d] Done with file." % self.wid)
                 self.ctx.work_queue.task_done()
+                self.success &= success
             except Exception as ex:
                 self.ctx.abort_event.set()
+                self.success = False
                 logger.error("[%d] Critical error, aborting." % self.wid)
                 logger.exception(ex)
                 break
@@ -347,7 +353,7 @@
             record_entry.flags |= FLAG_OVERRIDEN
             logger.info(format_timed(start_time,
                     '%s [not baked, overridden]' % rel_path))
-            return
+            return True
 
         processors = pipeline.getFilteredProcessors(
                 job.mount_info['processors'])
@@ -355,9 +361,12 @@
             builder = ProcessingTreeBuilder(processors)
             tree_root = builder.build(rel_path)
         except ProcessingTreeError as ex:
-            record_entry.errors.append(str(ex))
-            logger.error("Error processing %s: %s" % (rel_path, ex))
-            return
+            msg = str(ex)
+            logger.error("Error processing %s: %s" % (rel_path, msg))
+            while ex:
+                record_entry.errors.append(str(ex))
+                ex = ex.__cause__
+            return False
 
         print_node(tree_root, recursive=True)
         leaves = tree_root.getLeaves()
@@ -380,10 +389,18 @@
                     pipeline.out_dir, self.ctx.pipeline_lock)
             if runner.processSubTree(tree_root):
                 record_entry.flags |= FLAG_PROCESSED
-                logger.info(format_timed(start_time, "[%d] %s" % (self.wid, rel_path)))
+                logger.info(format_timed(
+                    start_time, "[%d] %s" % (self.wid, rel_path)))
+            return True
         except ProcessingTreeError as ex:
-            record_entry.errors.append(str(ex))
-            logger.error("Error processing %s: %s" % (rel_path, ex))
+            msg = str(ex)
+            if isinstance(ex, ProcessorError):
+                msg = str(ex.__cause__)
+            logger.error("Error processing %s: %s" % (rel_path, msg))
+            while ex:
+                record_entry.errors.append(str(ex))
+                ex = ex.__cause__
+            return False
 
 
 def make_mount_infos(mounts, root_dir):
--- a/piecrust/processing/records.py	Sat Jan 31 16:28:16 2015 -0800
+++ b/piecrust/processing/records.py	Sat Jan 31 17:08:02 2015 -0800
@@ -9,6 +9,7 @@
         super(ProcessorPipelineRecord, self).__init__()
         self.out_dir = None
         self.process_time = None
+        self.success = False
 
     def hasOverrideEntry(self, rel_path):
         return self.findEntry(rel_path) is not None
--- a/piecrust/processing/tree.py	Sat Jan 31 16:28:16 2015 -0800
+++ b/piecrust/processing/tree.py	Sat Jan 31 17:08:02 2015 -0800
@@ -24,6 +24,16 @@
     pass
 
 
+class ProcessorError(ProcessingTreeError):
+    def __init__(self, proc_name, in_path, *args):
+        super(ProcessorError, self).__init__(*args)
+        self.proc_name = proc_name
+        self.in_path = in_path
+
+    def __str__(self):
+        return "Processor %s failed on: %s" % (self.proc_name, self.in_path)
+
+
 class ProcessingTreeNode(object):
     def __init__(self, path, available_procs, level=0):
         self.path = path
@@ -154,8 +164,7 @@
                             colored=False))
                 return True
             except Exception as e:
-                raise ProcessingTreeError("Error processing: %s" %
-                        node.path) from e
+                raise ProcessorError(proc.PROCESSOR_NAME, full_path) from e
 
         # All outputs of a node must go to the same directory, so we can get
         # the output directory off of the first output.
@@ -183,7 +192,7 @@
                 print_node(node, "-> %s [clean]" % out_dir)
                 return False
         except Exception as e:
-            raise Exception("Error processing: %s" % node.path) from e
+            raise ProcessorError(proc.PROCESSOR_NAME, full_path) from e
 
     def _computeNodeState(self, node):
         if node.state != STATE_UNKNOWN: