Mercurial > piecrust2
changeset 421:4a43d7015b75
bake: Improve performance timers reports.
Add timers per-worker, and separate bake and pipeline workers.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Sat, 20 Jun 2015 23:27:39 -0700 |
parents | f1b759c188b0 |
children | a8a12f97addf |
files | piecrust/baking/baker.py piecrust/baking/worker.py piecrust/commands/builtin/baking.py piecrust/processing/pipeline.py piecrust/processing/worker.py |
diffstat | 5 files changed, 51 insertions(+), 19 deletions(-) [+] |
line wrap: on
line diff
--- a/piecrust/baking/baker.py Sat Jun 20 23:27:04 2015 -0700 +++ b/piecrust/baking/baker.py Sat Jun 20 23:27:39 2015 -0700 @@ -107,16 +107,19 @@ # Get the timing information from the workers. record.current.timers = {} - for _ in range(len(pool.workers)): + for i in range(len(pool.workers)): try: timers = pool.results.get(True, 0.1) except queue.Empty: logger.error("Didn't get timing information from all workers.") break + worker_name = 'BakeWorker_%d' % i + record.current.timers[worker_name] = {} for name, val in timers['data'].items(): main_val = record.current.timers.setdefault(name, 0) record.current.timers[name] = main_val + val + record.current.timers[worker_name][name] = val # Delete files from the output. self._handleDeletetions(record) @@ -549,7 +552,7 @@ pool.queue, pool.results, pool.abort_event, force=self.force, debug=self.app.debug) w = multiprocessing.Process( - name='Worker_%d' % i, + name='BakeWorker_%d' % i, target=worker_func, args=(i, ctx)) w.start() pool.workers.append(w)
--- a/piecrust/baking/worker.py Sat Jun 20 23:27:04 2015 -0700 +++ b/piecrust/baking/worker.py Sat Jun 20 23:27:39 2015 -0700 @@ -52,7 +52,8 @@ # Create the app local to this worker. app = PieCrust(self.ctx.root_dir, debug=self.ctx.debug) app.env.fs_cache_only_for_main_page = True - app.env.registerTimer("Worker_%d" % self.wid) + app.env.registerTimer("BakeWorker_%d_Total" % self.wid) + app.env.registerTimer("BakeWorkerInit") app.env.registerTimer("JobReceive") # Create the job handlers. @@ -63,6 +64,8 @@ for jt, jh in job_handlers.items(): app.env.registerTimer(type(jh).__name__) + app.env.stepTimerSince("BakeWorkerInit", work_start_time) + # Start working! aborted_with_exception = None while not self.ctx.abort_event.is_set(): @@ -91,8 +94,8 @@ self.ctx.results.put_nowait({'type': 'error', 'messages': msgs}) # Send our timers to the main process before exiting. - app.env.stepTimer("Worker_%d" % self.wid, - time.perf_counter() - work_start_time) + app.env.stepTimerSince("BakeWorker_%d_Total" % self.wid, + work_start_time) self.ctx.results.put_nowait({ 'type': 'timers', 'data': app.env._timers}) @@ -187,6 +190,9 @@ class RenderFirstSubJobHandler(JobHandler): + def __init__(self, app, ctx): + super(RenderFirstSubJobHandler, self).__init__(app, ctx) + def handleJob(self, job): # Render the segments for the first sub-page of this page. fac = job.payload.factory_info.build(self.app)
--- a/piecrust/commands/builtin/baking.py Sat Jun 20 23:27:04 2015 -0700 +++ b/piecrust/commands/builtin/baking.py Sat Jun 20 23:27:39 2015 -0700 @@ -4,6 +4,7 @@ import hashlib import fnmatch import datetime +from colorama import Fore from piecrust.baking.baker import Baker from piecrust.baking.records import ( BakeRecord, BakeRecordEntry, SubPageBakeInfo) @@ -71,14 +72,9 @@ # Show merged timers. if ctx.args.show_timers: - from colorama import Fore logger.info("-------------------") logger.info("Timing information:") - for name in sorted(ctx.timers.keys()): - val_str = '%8.1f s' % ctx.timers[name] - logger.info( - "[%s%s%s] %s" % - (Fore.GREEN, val_str, Fore.RESET, name)) + _show_timers(ctx.timers) # All done. logger.info('-------------------------') @@ -115,9 +111,30 @@ return for name, val in source.items(): - if name not in target: - target[name] = 0 - target[name] += val + if isinstance(val, float): + if name not in target: + target[name] = 0 + target[name] += val + elif isinstance(val, dict): + if name not in target: + target[name] = {} + _merge_timers(val, target[name]) + + +def _show_timers(timers, indent=''): + sub_timer_names = [] + for name in sorted(timers.keys()): + if isinstance(timers[name], float): + val_str = '%8.1f s' % timers[name] + logger.info( + "%s[%s%s%s] %s" % + (indent, Fore.GREEN, val_str, Fore.RESET, name)) + else: + sub_timer_names.append(name) + + for name in sub_timer_names: + logger.info('%s:' % name) + _show_timers(timers[name], indent + ' ') class ShowRecordCommand(ChefCommand):
--- a/piecrust/processing/pipeline.py Sat Jun 20 23:27:04 2015 -0700 +++ b/piecrust/processing/pipeline.py Sat Jun 20 23:27:39 2015 -0700 @@ -134,16 +134,19 @@ # Get timing information from the workers. record.current.timers = {} - for _ in range(len(pool.workers)): + for i in range(len(pool.workers)): try: timers = pool.results.get(True, 0.1) except queue.Empty: logger.error("Didn't get timing information from all workers.") break + worker_name = 'PipelineWorker_%d' % i + record.current.timers[worker_name] = {} for name, val in timers['data'].items(): main_val = record.current.timers.setdefault(name, 0) record.current.timers[name] = main_val + val + record.current.timers[worker_name][name] = val # Invoke post-processors. pipeline_ctx.record = record.current @@ -256,7 +259,7 @@ ctx.enabled_processors = self.enabled_processors ctx.additional_processors = self.additional_processors w = multiprocessing.Process( - name='Worker_%d' % i, + name='PipelineWorker_%d' % i, target=worker_func, args=(i, ctx)) w.start() pool.workers.append(w)
--- a/piecrust/processing/worker.py Sat Jun 20 23:27:04 2015 -0700 +++ b/piecrust/processing/worker.py Sat Jun 20 23:27:39 2015 -0700 @@ -73,7 +73,8 @@ # Create the app local to this worker. app = PieCrust(self.ctx.root_dir, debug=self.ctx.debug) app.env.fs_cache_only_for_main_page = True - app.env.registerTimer("Worker_%d" % self.wid) + app.env.registerTimer("PipelineWorker_%d_Total" % self.wid) + app.env.registerTimer("PipelineWorkerInit") app.env.registerTimer("JobReceive") app.env.registerTimer('BuildProcessingTree') app.env.registerTimer('RunProcessingTree') @@ -102,6 +103,8 @@ # patching the processors with some new ones. processors.sort(key=lambda p: p.priority) + app.env.stepTimerSince("PipelineWorkerInit", work_start_time) + aborted_with_exception = None while not self.ctx.abort_event.is_set(): try: @@ -131,8 +134,8 @@ for proc in processors: proc.onPipelineEnd(pipeline_ctx) - app.env.stepTimer("Worker_%d" % self.wid, - time.perf_counter() - work_start_time) + app.env.stepTimerSince("PipelineWorker_%d_Total" % self.wid, + work_start_time) self.ctx.results.put_nowait({ 'type': 'timers', 'data': app.env._timers})