diff piecrust/processing/base.py @ 117:6827dcc9d3fb

Changes to the asset processing pipeline: * Add semi-functional RequireJS processor. * Processors now match on the relative path. * Support for processors that add more processors of their own. * A couple of related fixes.
author Ludovic Chabant <ludovic@chabant.com>
date Tue, 28 Oct 2014 08:20:38 -0700
parents 45828c4167ad
children 133845647083
line wrap: on
line diff
--- a/piecrust/processing/base.py	Mon Oct 27 08:18:12 2014 -0700
+++ b/piecrust/processing/base.py	Tue Oct 28 08:20:38 2014 -0700
@@ -36,7 +36,7 @@
     def onPipelineEnd(self, pipeline):
         pass
 
-    def matches(self, filename):
+    def matches(self, path):
         return False
 
     def getDependencies(self, path):
@@ -56,7 +56,7 @@
         super(CopyFileProcessor, self).__init__()
         self.priority = PRIORITY_LAST
 
-    def matches(self, filename):
+    def matches(self, path):
         return True
 
     def getOutputFilenames(self, filename):
@@ -74,9 +74,9 @@
         super(SimpleFileProcessor, self).__init__()
         self.extensions = extensions or {}
 
-    def matches(self, filename):
+    def matches(self, path):
         for ext in self.extensions:
-            if filename.endswith('.' + ext):
+            if path.endswith('.' + ext):
                 return True
         return False
 
@@ -169,13 +169,20 @@
             self.processors))
 
     def run(self, src_dir_or_file=None):
-        record = ProcessorPipelineRecord()
+        # Invoke pre-processors.
+        for proc in self.processors:
+            proc.onPipelineStart(self)
+
+        # Sort our processors again in case the pre-process step involved
+        # patching the processors with some new ones.
+        self.processors.sort(key=lambda p: p.priority)
 
         # Create the workers.
         pool = []
         queue = Queue()
         abort = threading.Event()
         pipeline_lock = threading.Lock()
+        record = ProcessorPipelineRecord()
         for i in range(self.num_workers):
             ctx = ProcessingWorkerContext(self, record, queue, abort,
                     pipeline_lock)
@@ -183,10 +190,6 @@
             worker.start()
             pool.append(worker)
 
-        # Invoke pre-processors.
-        for proc in self.processors:
-            proc.onPipelineStart(self)
-
         if src_dir_or_file is not None:
             # Process only the given path.
             # Find out what mount point this is in.