Mercurial > piecrust2
view piecrust/processing/requirejs.py @ 369:4b1019bb2533
serve: Giant refactor to change how we handle data when serving pages.
* We need a distinction between source metadata and route metadata. In most
cases they're the same, but in cases like taxonomy pages, route metadata
contains more things that can't be in source metadata if we want to re-use
cached pages.
* Create a new `QualifiedPage` type which is a page with a specific route
and route metadata. Pass this around in many places.
* Instead of passing an URL around, use the route in the `QualifiedPage` to
generate URLs. This is better since it removes the guess-work from trying
to generate URLs for sub-pages.
* Deep-copy app and page configurations before passing them around to things
that could modify them, like data builders and such.
* Exclude taxonomy pages from iterator data providers.
* Properly nest iterator data providers for when the theme and user page
sources are merged inside `site.pages`.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Sun, 03 May 2015 18:47:10 -0700 |
parents | e5f048799d61 |
children | 4850f8c21b6e |
line wrap: on
line source
import os import os.path import json import hashlib import logging import platform import subprocess from piecrust.processing.base import Processor, PRIORITY_FIRST from piecrust.processing.tree import FORCE_BUILD logger = logging.getLogger(__name__) class RequireJSProcessor(Processor): PROCESSOR_NAME = 'requirejs' def __init__(self): super(RequireJSProcessor, self).__init__() self.is_bypassing_structured_processing = True self._conf = None def initialize(self, app): super(RequireJSProcessor, self).initialize(app) self._conf = app.config.get('requirejs') if self._conf is None: return if 'build_path' not in self._conf: raise Exception("You need to specify `requirejs/build_path` " "for RequireJS.") self._conf.setdefault('bin', 'r.js') self._conf.setdefault('out_path', self._conf['build_path']) def onPipelineStart(self, pipeline): super(RequireJSProcessor, self).onPipelineStart(pipeline) if self._conf is None: return logger.debug("Adding Javascript suppressor to build pipeline.") skip = _JavascriptSkipProcessor(self._conf['build_path']) pipeline.processors.append(skip) def matches(self, path): if self._conf is None: return False return path == self._conf['build_path'] def getDependencies(self, path): return FORCE_BUILD def process(self, path, out_dir): args = [self._conf['bin'], '-o', path] shell = (platform.system() == 'Windows') cwd = self.app.root_dir logger.debug("Running RequireJS: %s" % ' '.join(args)) try: retcode = subprocess.call(args, shell=shell, cwd=cwd) except FileNotFoundError as ex: logger.error("Tried running RequireJS processor " "with command: %s" % args) raise Exception("Error running RequireJS. " "Did you install it?") from ex if retcode != 0: raise Exception("Error occured in RequireJS compiler. " "Please check log messages above for " "more information.") return True class _JavascriptSkipProcessor(Processor): PROCESSOR_NAME = 'requirejs_javascript_skip' def __init__(self, except_path=None): super(_JavascriptSkipProcessor, self).__init__() self.priority = PRIORITY_FIRST self.is_bypassing_structured_processing = True self._except_path = except_path def matches(self, path): _, ext = os.path.splitext(path) return ext == '.js' and path != self._except_path def process(self, in_path, out_path): return False