Mercurial > piecrust2
comparison piecrust/processing/pipeline.py @ 492:d90ccdf18156
tests: Fix processing tests on Windows.
See the comment in `pipeline.py` for more info but basically I was passing
already initialized processors to the worker pool, which means pickling the
whole app. Pretty bad. Interesting that it only broke on Windows, though.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Thu, 23 Jul 2015 22:07:32 -0700 |
parents | aefe70229fdd |
children | 7453baeb0839 |
comparison
equal
deleted
inserted
replaced
491:152a15046b41 | 492:d90ccdf18156 |
---|---|
55 '.git*', '.hg*', '.svn'] | 55 '.git*', '.hg*', '.svn'] |
56 self.ignore_patterns = make_re(ignores) | 56 self.ignore_patterns = make_re(ignores) |
57 self.force_patterns = make_re(baker_params.get('force', [])) | 57 self.force_patterns = make_re(baker_params.get('force', [])) |
58 | 58 |
59 # Those things are mostly for unit-testing. | 59 # Those things are mostly for unit-testing. |
60 # | |
61 # Note that additiona processors can't be passed as instances. | |
62 # Instead, we need some factory functions because we need to create | |
63 # one instance right away to use during the initialization phase, and | |
64 # another instance to pass to the worker pool. The initialized one will | |
65 # be tied to the PieCrust app instance, which can't be pickled across | |
66 # processes. | |
60 self.enabled_processors = None | 67 self.enabled_processors = None |
61 self.additional_processors = None | 68 self.additional_processors_factories = None |
62 | 69 |
63 def addIgnorePatterns(self, patterns): | 70 def addIgnorePatterns(self, patterns): |
64 self.ignore_patterns += make_re(patterns) | 71 self.ignore_patterns += make_re(patterns) |
65 | 72 |
66 def run(self, src_dir_or_file=None, *, | 73 def run(self, src_dir_or_file=None, *, |
72 if self.enabled_processors is not None: | 79 if self.enabled_processors is not None: |
73 logger.debug("Filtering processors to: %s" % | 80 logger.debug("Filtering processors to: %s" % |
74 self.enabled_processors) | 81 self.enabled_processors) |
75 processors = get_filtered_processors(processors, | 82 processors = get_filtered_processors(processors, |
76 self.enabled_processors) | 83 self.enabled_processors) |
77 if self.additional_processors is not None: | 84 if self.additional_processors_factories is not None: |
78 logger.debug("Adding %s additional processors." % | 85 logger.debug("Adding %s additional processors." % |
79 len(self.additional_processors)) | 86 len(self.additional_processors_factories)) |
80 for proc in self.additional_processors: | 87 for proc_fac in self.additional_processors_factories: |
88 proc = proc_fac() | |
81 self.app.env.registerTimer(proc.__class__.__name__, | 89 self.app.env.registerTimer(proc.__class__.__name__, |
82 raise_if_registered=False) | 90 raise_if_registered=False) |
83 proc.initialize(self.app) | 91 proc.initialize(self.app) |
84 processors.append(proc) | 92 processors.append(proc) |
85 | 93 |
246 | 254 |
247 ctx = ProcessingWorkerContext( | 255 ctx = ProcessingWorkerContext( |
248 self.app.root_dir, self.out_dir, self.tmp_dir, | 256 self.app.root_dir, self.out_dir, self.tmp_dir, |
249 self.force, self.app.debug) | 257 self.force, self.app.debug) |
250 ctx.enabled_processors = self.enabled_processors | 258 ctx.enabled_processors = self.enabled_processors |
251 ctx.additional_processors = self.additional_processors | 259 if self.additional_processors_factories is not None: |
260 ctx.additional_processors = [ | |
261 proc_fac() | |
262 for proc_fac in self.additional_processors_factories] | |
252 | 263 |
253 pool = WorkerPool( | 264 pool = WorkerPool( |
254 worker_class=ProcessingWorker, | 265 worker_class=ProcessingWorker, |
255 initargs=(ctx,)) | 266 initargs=(ctx,)) |
256 return pool | 267 return pool |