Mercurial > piecrust2
comparison piecrust/processing/base.py @ 205:e725af1d48fb
bake: Changes in how assets directories are configured.
Change `skip_patterns` and `force_patterns` to `ignore` and `force`.
Put less responsibility on the `bake` command to specify all those settings,
and more on the `Baker` and `ProcessorPipeline` themselves.
Add some tests.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Sun, 18 Jan 2015 12:12:57 -0800 |
parents | 29165f2f315d |
children | c5330cb35794 |
comparison
equal
deleted
inserted
replaced
204:f98451237371 | 205:e725af1d48fb |
---|---|
111 self.job_queue = job_queue | 111 self.job_queue = job_queue |
112 self.record = record | 112 self.record = record |
113 | 113 |
114 | 114 |
115 class ProcessorPipeline(object): | 115 class ProcessorPipeline(object): |
116 def __init__(self, app, mounts, out_dir, force=False, | 116 def __init__(self, app, out_dir, force=False): |
117 skip_patterns=None, force_patterns=None, num_workers=4): | |
118 assert app and out_dir | 117 assert app and out_dir |
119 self.app = app | 118 self.app = app |
120 self.mounts = mounts | 119 self.out_dir = out_dir |
120 self.force = force | |
121 | |
121 tmp_dir = app.cache_dir | 122 tmp_dir = app.cache_dir |
122 if not tmp_dir: | 123 if not tmp_dir: |
123 import tempfile | 124 import tempfile |
124 tmp_dir = os.path.join(tempfile.gettempdir(), 'piecrust') | 125 tmp_dir = os.path.join(tempfile.gettempdir(), 'piecrust') |
125 self.tmp_dir = os.path.join(tmp_dir, 'proc') | 126 self.tmp_dir = os.path.join(tmp_dir, 'proc') |
126 self.out_dir = out_dir | 127 |
127 self.force = force | 128 baker_params = app.config.get('baker') or {} |
128 self.skip_patterns = skip_patterns or [] | 129 |
129 self.force_patterns = force_patterns or [] | 130 assets_dirs = baker_params.get('assets_dirs', app.assets_dirs) |
130 self.processors = app.plugin_loader.getProcessors() | 131 self.mounts = make_mount_info(assets_dirs) |
131 self.num_workers = num_workers | 132 |
132 | 133 self.num_workers = baker_params.get('workers', 4) |
133 self.mounts = make_mount_info(self.mounts) | 134 |
134 | 135 ignores = baker_params.get('ignore', []) |
135 self.skip_patterns += ['_cache', '_counter', | 136 ignores += [ |
137 '_cache', '_counter', | |
136 'theme_info.yml', | 138 'theme_info.yml', |
137 '.DS_Store', 'Thumbs.db', | 139 '.DS_Store', 'Thumbs.db', |
138 '.git*', '.hg*', '.svn'] | 140 '.git*', '.hg*', '.svn'] |
139 | 141 self.skip_patterns = make_re(ignores) |
140 self.skip_patterns = make_re(self.skip_patterns) | 142 self.force_patterns = make_re(baker_params.get('force', [])) |
141 self.force_patterns = make_re(self.force_patterns) | 143 |
144 self.processors = app.plugin_loader.getProcessors() | |
142 | 145 |
143 def addSkipPatterns(self, patterns): | 146 def addSkipPatterns(self, patterns): |
144 self.skip_patterns += make_re(patterns) | 147 self.skip_patterns += make_re(patterns) |
145 | 148 |
146 def filterProcessors(self, authorized_names): | 149 def filterProcessors(self, authorized_names): |
147 if not authorized_names or authorized_names == '*': | 150 self.processors = self.getFilteredProcessors(authorized_names) |
151 | |
152 def getFilteredProcessors(self, authorized_names): | |
153 if not authorized_names or authorized_names == 'all': | |
148 return self.processors | 154 return self.processors |
149 | 155 |
150 if isinstance(authorized_names, str): | 156 if isinstance(authorized_names, str): |
151 authorized_names = split_processor_names_re.split(authorized_names) | 157 authorized_names = split_processor_names_re.split(authorized_names) |
152 | 158 |
153 procs = [] | 159 procs = [] |
154 has_star = '*' in authorized_names | 160 has_star = 'all' in authorized_names |
155 for p in self.processors: | 161 for p in self.processors: |
156 for name in authorized_names: | 162 for name in authorized_names: |
157 if name == p.PROCESSOR_NAME: | 163 if name == p.PROCESSOR_NAME: |
158 procs.append(p) | 164 procs.append(p) |
159 break | 165 break |
161 break | 167 break |
162 else: | 168 else: |
163 if has_star: | 169 if has_star: |
164 procs.append(p) | 170 procs.append(p) |
165 return procs | 171 return procs |
166 | |
167 return list(filter( | |
168 lambda p: p.PROCESSOR_NAME in authorized_names, | |
169 self.processors)) | |
170 | 172 |
171 def run(self, src_dir_or_file=None, *, | 173 def run(self, src_dir_or_file=None, *, |
172 new_only=False, delete=True, | 174 new_only=False, delete=True, |
173 previous_record=None, save_record=True): | 175 previous_record=None, save_record=True): |
174 # Invoke pre-processors. | 176 # Invoke pre-processors. |
344 record_entry.flags |= FLAG_OVERRIDEN | 346 record_entry.flags |= FLAG_OVERRIDEN |
345 logger.info(format_timed(start_time, | 347 logger.info(format_timed(start_time, |
346 '%s [not baked, overridden]' % rel_path)) | 348 '%s [not baked, overridden]' % rel_path)) |
347 return | 349 return |
348 | 350 |
349 processors = pipeline.filterProcessors(job.mount_info['processors']) | 351 processors = pipeline.getFilteredProcessors( |
352 job.mount_info['processors']) | |
350 try: | 353 try: |
351 builder = ProcessingTreeBuilder(processors) | 354 builder = ProcessingTreeBuilder(processors) |
352 tree_root = builder.build(rel_path) | 355 tree_root = builder.build(rel_path) |
353 except ProcessingTreeError as ex: | 356 except ProcessingTreeError as ex: |
354 record_entry.errors.append(str(ex)) | 357 record_entry.errors.append(str(ex)) |
388 | 391 |
389 for name, info in mounts.items(): | 392 for name, info in mounts.items(): |
390 if not isinstance(info, dict): | 393 if not isinstance(info, dict): |
391 raise Exception("Asset directory info for '%s' is not a " | 394 raise Exception("Asset directory info for '%s' is not a " |
392 "dictionary." % name) | 395 "dictionary." % name) |
393 info.setdefault('processors', '*') | 396 info.setdefault('processors', 'all -uglifyjs -cleancss') |
394 | 397 |
395 return mounts | 398 return mounts |
396 | 399 |
397 | 400 |
398 def make_re(patterns): | 401 def make_re(patterns): |