Mercurial > piecrust2
changeset 1011:c4cf3cfe2726
bake: Better performance stats, and add callback to preload templates.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Sun, 26 Nov 2017 22:23:03 -0800 |
parents | 501bd4ab7e06 |
children | 576f7ebcd9c0 |
files | piecrust/app.py piecrust/baking/baker.py piecrust/templating/base.py piecrust/workerpool.py |
diffstat | 4 files changed, 43 insertions(+), 12 deletions(-) [+] |
line wrap: on
line diff
--- a/piecrust/app.py Sun Nov 26 22:21:33 2017 -0800 +++ b/piecrust/app.py Sun Nov 26 22:23:03 2017 -0800 @@ -50,7 +50,6 @@ stats.registerTimer("PageRenderSegments") stats.registerTimer("PageRenderLayout") stats.registerTimer("PageSerialize") - stats.registerTimer("MergedMapping_get") stats.registerCounter('PageLoads') stats.registerCounter('PageRenderSegments') stats.registerCounter('PageRenderLayout')
--- a/piecrust/baking/baker.py Sun Nov 26 22:21:33 2017 -0800 +++ b/piecrust/baking/baker.py Sun Nov 26 22:23:03 2017 -0800 @@ -54,8 +54,7 @@ stats = self.app.env.stats stats.registerTimer('LoadSourceContents', raise_if_registered=False) - stats.registerTimer('MasterTaskPut_1', raise_if_registered=False) - stats.registerTimer('MasterTaskPut_2+', raise_if_registered=False) + stats.registerTimer('CacheTemplates', raise_if_registered=False) # Make sure the output directory exists. if not os.path.isdir(self.out_dir): @@ -98,8 +97,9 @@ # Done with all the setup, let's start the actual work. logger.info(format_timed(start_time, "setup baker")) - # Load all sources. + # Load all sources, pre-cache templates. self._loadSources(ppmngr) + self._populateTemplateCaches() # Bake the realms. self._bakeRealms(pool, ppmngr, record_histories) @@ -216,10 +216,19 @@ rec.addEntry(e) stats = self.app.env.stats - stats.stepTimer('LoadSourceContents', - time.perf_counter() - start_time) + stats.stepTimer('LoadSourceContents', time.perf_counter() - start_time) logger.info(format_timed(start_time, "loaded site content")) + def _populateTemplateCaches(self): + start_time = time.perf_counter() + + for eng in self.app.plugin_loader.getTemplateEngines(): + eng.populateCache() + + stats = self.app.env.stats + stats.stepTimer('CacheTemplates', time.perf_counter() - start_time) + logger.info(format_timed(start_time, "cache templates")) + def _bakeRealms(self, pool, ppmngr, record_histories): # Bake the realms -- user first, theme second, so that a user item # can override a theme item. @@ -272,8 +281,6 @@ "(%s, step 0)." % (new_job_count, src.name, pp.PIPELINE_NAME, realm_name)) - stats.stepTimer('MasterTaskPut_1', time.perf_counter() - start_time) - if job_count == 0: logger.debug("No jobs queued! Bailing out of this bake pass.") return @@ -315,8 +322,6 @@ pool.queueJobs(jobs) participating_source_names.append(sn) - stats.stepTimer('MasterTaskPut_2+', time.perf_counter() - start_time) - if job_count == 0: break
--- a/piecrust/templating/base.py Sun Nov 26 22:21:33 2017 -0800 +++ b/piecrust/templating/base.py Sun Nov 26 22:23:03 2017 -0800 @@ -27,6 +27,9 @@ def initialize(self, app): self.app = app + def populateCache(self): + pass + def renderSegment(self, path, segment, data): raise NotImplementedError()
--- a/piecrust/workerpool.py Sun Nov 26 22:21:33 2017 -0800 +++ b/piecrust/workerpool.py Sun Nov 26 22:23:03 2017 -0800 @@ -111,11 +111,12 @@ def _real_worker_func_unsafe(params): + init_start_time = time.perf_counter() + wid = params.wid stats = ExecutionStats() stats.registerTimer('WorkerInit') - init_start_time = time.perf_counter() # In a context where `multiprocessing` is using the `spawn` forking model, # the new process doesn't inherit anything, so we lost all our logging @@ -248,6 +249,14 @@ callback=None, error_callback=None, worker_count=None, batch_size=None, userdata=None): + init_start_time = time.perf_counter() + + stats = ExecutionStats() + stats.registerTimer('MasterInit') + self._stats = stats + self._time_in_put = 0 + self._time_in_get = 0 + self.userdata = userdata worker_count = worker_count or os.cpu_count() or 1 @@ -299,6 +308,8 @@ self._result_handler.daemon = True self._result_handler.start() + stats.stepTimerSince('MasterInit', init_start_time) + def queueJobs(self, jobs): if self._closed: if self._error_on_join: @@ -308,6 +319,8 @@ jobs = list(jobs) new_job_count = len(jobs) if new_job_count > 0: + put_start_time = time.perf_counter() + with self._lock_jobs_left: self._jobs_left += new_job_count @@ -323,6 +336,8 @@ job_batch = jobs[cur_offset:next_batch_idx] self._quick_put((TASK_JOB_BATCH, job_batch)) cur_offset = next_batch_idx + + self._time_in_put += (time.perf_counter() - put_start_time) else: with self._lock_jobs_left: done = (self._jobs_left == 0) @@ -346,6 +361,7 @@ if not self._event.is_set(): raise Exception("A previous job queue hasn't been cleared.") + close_start_time = time.perf_counter() logger.debug("Closing worker pool...") live_workers = list(filter(lambda w: w is not None, self._pool)) handler = _ReportHandler(len(live_workers)) @@ -368,7 +384,13 @@ self._result_handler.join() self._closed = True - return handler.reports + stats = self._stats + stats.registerTimer('MasterTaskPut', time=self._time_in_put) + stats.registerTimer('MasterResultGet', time=self._time_in_get) + stats.registerTimer('MasterClose', + time=(time.perf_counter() - close_start_time)) + + return [stats] + handler.reports def _onResultHandlerCriticalError(self, wid): logger.error("Result handler received a critical error from " @@ -397,7 +419,9 @@ userdata = pool.userdata while True: try: + get_start_time = time.perf_counter() res = pool._quick_get() + pool._time_in_get = (time.perf_counter() - get_start_time) except (EOFError, OSError): logger.debug("Result handler thread encountered connection " "problem, exiting.")