Mercurial > piecrust2
comparison piecrust/processing/base.py @ 36:485682a6de50
New site layout support.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Wed, 20 Aug 2014 23:16:51 -0700 |
parents | e4c345dcf33c |
children | 2f717f961996 |
comparison
equal
deleted
inserted
replaced
35:e4c345dcf33c | 36:485682a6de50 |
---|---|
96 class ProcessorPipelineRecord(Record): | 96 class ProcessorPipelineRecord(Record): |
97 VERSION = 1 | 97 VERSION = 1 |
98 | 98 |
99 def __init__(self): | 99 def __init__(self): |
100 super(ProcessorPipelineRecord, self).__init__() | 100 super(ProcessorPipelineRecord, self).__init__() |
101 self.is_multi_mount = False | |
102 | 101 |
103 def addEntry(self, item): | 102 def addEntry(self, item): |
104 self.entries.append(item) | 103 self.entries.append(item) |
105 | 104 |
106 def hasOverrideEntry(self, rel_path): | 105 def hasOverrideEntry(self, rel_path): |
107 if not self.is_multi_mount: | |
108 return False | |
109 return self.findEntry(rel_path) is not None | 106 return self.findEntry(rel_path) is not None |
110 | 107 |
111 def findEntry(self, rel_path): | 108 def findEntry(self, rel_path): |
112 rel_path = rel_path.lower() | 109 rel_path = rel_path.lower() |
113 for entry in self.entries: | 110 for entry in self.entries: |
131 self.job_queue = job_queue | 128 self.job_queue = job_queue |
132 self.record = record | 129 self.record = record |
133 | 130 |
134 | 131 |
135 class ProcessorPipeline(object): | 132 class ProcessorPipeline(object): |
136 def __init__(self, app, out_dir, force=False, mounts=None, | 133 def __init__(self, app, mounts, out_dir, force=False, |
137 skip_patterns=None, force_patterns=None, num_workers=4): | 134 skip_patterns=None, force_patterns=None, num_workers=4): |
138 self.app = app | 135 self.app = app |
136 self.mounts = mounts | |
139 tmp_dir = app.cache_dir | 137 tmp_dir = app.cache_dir |
140 if not tmp_dir: | 138 if not tmp_dir: |
141 import tempfile | 139 import tempfile |
142 tmp_dir = os.path.join(tempfile.gettempdir(), 'piecrust') | 140 tmp_dir = os.path.join(tempfile.gettempdir(), 'piecrust') |
143 self.tmp_dir = os.path.join(tmp_dir, 'proc') | 141 self.tmp_dir = os.path.join(tmp_dir, 'proc') |
144 self.out_dir = out_dir | 142 self.out_dir = out_dir |
145 self.force = force | 143 self.force = force |
146 self.mounts = mounts or {} | |
147 self.skip_patterns = skip_patterns or [] | 144 self.skip_patterns = skip_patterns or [] |
148 self.force_patterns = force_patterns or [] | 145 self.force_patterns = force_patterns or [] |
149 self.processors = app.plugin_loader.getProcessors() | 146 self.processors = app.plugin_loader.getProcessors() |
150 self.num_workers = num_workers | 147 self.num_workers = num_workers |
151 | 148 |
152 if app.theme_dir is not None: | 149 self.skip_patterns += ['_cache', '_counter', |
153 self.mounts['theme'] = app.theme_dir | |
154 | |
155 self.skip_patterns += ['_cache', '_content', '_counter', | |
156 'theme_info.yml', | 150 'theme_info.yml', |
157 '.DS_Store', 'Thumbs.db', | 151 '.DS_Store', 'Thumbs.db', |
158 '.git*', '.hg*', '.svn'] | 152 '.git*', '.hg*', '.svn'] |
159 | 153 |
160 self.skip_patterns = make_re(self.skip_patterns) | 154 self.skip_patterns = make_re(self.skip_patterns) |
184 for proc in self.processors: | 178 for proc in self.processors: |
185 proc.onPipelineStart(self) | 179 proc.onPipelineStart(self) |
186 | 180 |
187 if src_dir_or_file is not None: | 181 if src_dir_or_file is not None: |
188 # Process only the given path. | 182 # Process only the given path. |
189 # Find out if this source directory is in a mount point. | 183 # Find out what mount point this is in. |
190 base_dir = self.app.root_dir | 184 for path in self.mounts: |
191 for name, path in self.mounts.items(): | |
192 if src_dir_or_file[:len(path)] == path: | 185 if src_dir_or_file[:len(path)] == path: |
193 base_dir = path | 186 base_dir = path |
187 break | |
188 else: | |
189 raise Exception("Input path '%s' is not part of any known " | |
190 "mount point: %s" % | |
191 (src_dir_or_file, self.mounts)) | |
194 | 192 |
195 ctx = ProcessingContext(base_dir, queue, record) | 193 ctx = ProcessingContext(base_dir, queue, record) |
196 logger.debug("Initiating processing pipeline on: %s" % src_dir_or_file) | 194 logger.debug("Initiating processing pipeline on: %s" % src_dir_or_file) |
197 if os.path.isdir(src_dir_or_file): | 195 if os.path.isdir(src_dir_or_file): |
198 self.processDirectory(ctx, src_dir_or_file) | 196 self.processDirectory(ctx, src_dir_or_file) |
199 elif os.path.isfile(src_dir_or_file): | 197 elif os.path.isfile(src_dir_or_file): |
200 self.processFile(ctx, src_dir_or_file) | 198 self.processFile(ctx, src_dir_or_file) |
201 | 199 |
202 else: | 200 else: |
203 # Process everything. | 201 # Process everything. |
204 ctx = ProcessingContext(self.app.root_dir, queue, record) | 202 for path in self.mounts: |
205 logger.debug("Initiating processing pipeline on: %s" % self.app.root_dir) | 203 ctx = ProcessingContext(path, queue, record) |
206 self.processDirectory(ctx, self.app.root_dir) | |
207 ctx.is_multi_mount = True | |
208 for name, path in self.mounts.items(): | |
209 mount_ctx = ProcessingContext(path, queue, record) | |
210 logger.debug("Initiating processing pipeline on: %s" % path) | 204 logger.debug("Initiating processing pipeline on: %s" % path) |
211 self.processDirectory(mount_ctx, path) | 205 self.processDirectory(ctx, path) |
212 | 206 |
213 # Wait on all workers. | 207 # Wait on all workers. |
214 for w in pool: | 208 for w in pool: |
215 w.join() | 209 w.join() |
216 if abort.is_set(): | 210 if abort.is_set(): |