comparison piecrust/processing/base.py @ 201:0c9de41689bb

processing: Add ability to specify processors per mount. The user can now specify which processors to use for each mount (i.e. each assets directory). This is mostly for disabling anything but `copy` for websites using Grunt or similar asset pipelines instead of the built-in one.
author Ludovic Chabant <ludovic@chabant.com>
date Wed, 14 Jan 2015 22:42:26 -0800
parents 154b8df04829
children 29165f2f315d
comparison
equal deleted inserted replaced
200:76e459d48c43 201:0c9de41689bb
100 def _doProcess(self, in_path, out_path): 100 def _doProcess(self, in_path, out_path):
101 raise NotImplementedError() 101 raise NotImplementedError()
102 102
103 103
104 class ProcessingContext(object): 104 class ProcessingContext(object):
105 def __init__(self, base_dir, job_queue, record=None): 105 def __init__(self, base_dir, mount_info, job_queue, record=None):
106 self.base_dir = base_dir 106 self.base_dir = base_dir
107 self.mount_info = mount_info
107 self.job_queue = job_queue 108 self.job_queue = job_queue
108 self.record = record 109 self.record = record
109 110
110 111
111 class ProcessorPipeline(object): 112 class ProcessorPipeline(object):
124 self.skip_patterns = skip_patterns or [] 125 self.skip_patterns = skip_patterns or []
125 self.force_patterns = force_patterns or [] 126 self.force_patterns = force_patterns or []
126 self.processors = app.plugin_loader.getProcessors() 127 self.processors = app.plugin_loader.getProcessors()
127 self.num_workers = num_workers 128 self.num_workers = num_workers
128 129
130 self.mounts = make_mount_info(self.mounts)
131
129 self.skip_patterns += ['_cache', '_counter', 132 self.skip_patterns += ['_cache', '_counter',
130 'theme_info.yml', 133 'theme_info.yml',
131 '.DS_Store', 'Thumbs.db', 134 '.DS_Store', 'Thumbs.db',
132 '.git*', '.hg*', '.svn'] 135 '.git*', '.hg*', '.svn']
133 136
136 139
137 def addSkipPatterns(self, patterns): 140 def addSkipPatterns(self, patterns):
138 self.skip_patterns += make_re(patterns) 141 self.skip_patterns += make_re(patterns)
139 142
140 def filterProcessors(self, authorized_names): 143 def filterProcessors(self, authorized_names):
141 self.processors = list(filter( 144 if not authorized_names or authorized_names == '*':
145 return self.processors
146
147 if isinstance(authorized_names, str):
148 authorized_names = authorized_names.split(',')
149 return list(filter(
142 lambda p: p.PROCESSOR_NAME in authorized_names, 150 lambda p: p.PROCESSOR_NAME in authorized_names,
143 self.processors)) 151 self.processors))
144 152
145 def run(self, src_dir_or_file=None, *, 153 def run(self, src_dir_or_file=None, *,
146 new_only=False, delete=True, 154 new_only=False, delete=True,
182 pool.append(worker) 190 pool.append(worker)
183 191
184 if src_dir_or_file is not None: 192 if src_dir_or_file is not None:
185 # Process only the given path. 193 # Process only the given path.
186 # Find out what mount point this is in. 194 # Find out what mount point this is in.
187 for path in self.mounts: 195 for path, info in self.mounts.items():
188 if src_dir_or_file[:len(path)] == path: 196 if src_dir_or_file[:len(path)] == path:
189 base_dir = path 197 base_dir = path
198 mount_info = info
190 break 199 break
191 else: 200 else:
192 raise Exception("Input path '%s' is not part of any known " 201 raise Exception("Input path '%s' is not part of any known "
193 "mount point: %s" % 202 "mount point: %s" %
194 (src_dir_or_file, self.mounts)) 203 (src_dir_or_file, self.mounts.keys()))
195 204
196 ctx = ProcessingContext(base_dir, queue, record) 205 ctx = ProcessingContext(base_dir, mount_info, queue, record)
197 logger.debug("Initiating processing pipeline on: %s" % src_dir_or_file) 206 logger.debug("Initiating processing pipeline on: %s" % src_dir_or_file)
198 if os.path.isdir(src_dir_or_file): 207 if os.path.isdir(src_dir_or_file):
199 self.processDirectory(ctx, src_dir_or_file, new_only) 208 self.processDirectory(ctx, src_dir_or_file, new_only)
200 elif os.path.isfile(src_dir_or_file): 209 elif os.path.isfile(src_dir_or_file):
201 self.processFile(ctx, src_dir_or_file, new_only) 210 self.processFile(ctx, src_dir_or_file, new_only)
202 211
203 else: 212 else:
204 # Process everything. 213 # Process everything.
205 for path in self.mounts: 214 for path, info in self.mounts.items():
206 ctx = ProcessingContext(path, queue, record) 215 ctx = ProcessingContext(path, info, queue, record)
207 logger.debug("Initiating processing pipeline on: %s" % path) 216 logger.debug("Initiating processing pipeline on: %s" % path)
208 self.processDirectory(ctx, path, new_only) 217 self.processDirectory(ctx, path, new_only)
209 218
210 # Wait on all workers. 219 # Wait on all workers.
211 for w in pool: 220 for w in pool:
249 self.processFile(ctx, os.path.join(dirpath, filename), 258 self.processFile(ctx, os.path.join(dirpath, filename),
250 new_only) 259 new_only)
251 260
252 def processFile(self, ctx, path, new_only=False): 261 def processFile(self, ctx, path, new_only=False):
253 logger.debug("Queuing: %s" % path) 262 logger.debug("Queuing: %s" % path)
254 job = ProcessingWorkerJob(ctx.base_dir, path, new_only) 263 job = ProcessingWorkerJob(ctx.base_dir, ctx.mount_info, path, new_only)
255 ctx.job_queue.put_nowait(job) 264 ctx.job_queue.put_nowait(job)
256 265
257 266
258 class ProcessingWorkerContext(object): 267 class ProcessingWorkerContext(object):
259 def __init__(self, pipeline, record, 268 def __init__(self, pipeline, record,
264 self.abort_event = abort_event 273 self.abort_event = abort_event
265 self.pipeline_lock = pipeline_lock 274 self.pipeline_lock = pipeline_lock
266 275
267 276
268 class ProcessingWorkerJob(object): 277 class ProcessingWorkerJob(object):
269 def __init__(self, base_dir, path, new_only=False): 278 def __init__(self, base_dir, mount_info, path, new_only=False):
270 self.base_dir = base_dir 279 self.base_dir = base_dir
280 self.mount_info = mount_info
271 self.path = path 281 self.path = path
272 self.new_only = new_only 282 self.new_only = new_only
273 283
274 284
275 class ProcessingWorker(threading.Thread): 285 class ProcessingWorker(threading.Thread):
316 record_entry.flags |= FLAG_OVERRIDEN 326 record_entry.flags |= FLAG_OVERRIDEN
317 logger.info(format_timed(start_time, 327 logger.info(format_timed(start_time,
318 '%s [not baked, overridden]' % rel_path)) 328 '%s [not baked, overridden]' % rel_path))
319 return 329 return
320 330
331 processors = pipeline.filterProcessors(job.mount_info['processors'])
321 try: 332 try:
322 builder = ProcessingTreeBuilder(pipeline.processors) 333 builder = ProcessingTreeBuilder(processors)
323 tree_root = builder.build(rel_path) 334 tree_root = builder.build(rel_path)
324 except ProcessingTreeError as ex: 335 except ProcessingTreeError as ex:
325 record_entry.errors.append(str(ex)) 336 record_entry.errors.append(str(ex))
326 logger.error("Error processing %s: %s" % (rel_path, ex)) 337 logger.error("Error processing %s: %s" % (rel_path, ex))
327 return 338 return
349 record_entry.flags |= FLAG_PROCESSED 360 record_entry.flags |= FLAG_PROCESSED
350 logger.info(format_timed(start_time, "[%d] %s" % (self.wid, rel_path))) 361 logger.info(format_timed(start_time, "[%d] %s" % (self.wid, rel_path)))
351 except ProcessingTreeError as ex: 362 except ProcessingTreeError as ex:
352 record_entry.errors.append(str(ex)) 363 record_entry.errors.append(str(ex))
353 logger.error("Error processing %s: %s" % (rel_path, ex)) 364 logger.error("Error processing %s: %s" % (rel_path, ex))
365
366
367 def make_mount_info(mounts):
368 if isinstance(mounts, list):
369 mounts = {m: {} for m in mounts}
370
371 for name, info in mounts.items():
372 if not isinstance(info, dict):
373 raise Exception("Asset directory info for '%s' is not a "
374 "dictionary." % name)
375 info.setdefault('processors', '*')
376
377 return mounts
354 378
355 379
356 def make_re(patterns): 380 def make_re(patterns):
357 re_patterns = [] 381 re_patterns = []
358 for pat in patterns: 382 for pat in patterns: