# HG changeset patch # User Ludovic Chabant # Date 1511679693 28800 # Node ID 09dc0240f08a33476a097dedde7ca40f61a0e08e # Parent 58ef814cc83e5f220eac137bb36df5c907e92666 bake: Simplify output. diff -r 58ef814cc83e -r 09dc0240f08a piecrust/baking/baker.py --- a/piecrust/baking/baker.py Sat Nov 25 22:37:46 2017 -0800 +++ b/piecrust/baking/baker.py Sat Nov 25 23:01:33 2017 -0800 @@ -249,6 +249,7 @@ job_count = 0 stats = self.app.env.stats realm_name = REALM_NAMES[realm].lower() + participating_source_names = [] for ppinfo in pplist: src = ppinfo.source @@ -262,6 +263,7 @@ new_job_count = len(jobs) job_count += new_job_count pool.queueJobs(jobs) + participating_source_names.append(src.name) else: new_job_count = 0 @@ -279,8 +281,8 @@ pool.wait() logger.info(format_timed( - start_time, "%d pipeline jobs completed (%s, step 0)." % - (job_count, realm_name))) + start_time, "%d jobs completed (%s)." % + (job_count, ', '.join(participating_source_names)))) # Now let's see if any job created a follow-up job. Let's keep # processing those jobs as long as they create new ones. @@ -294,6 +296,7 @@ start_time = time.perf_counter() job_count = 0 + participating_source_names = [] for sn, jobs in next_step_jobs.items(): if jobs: @@ -310,6 +313,7 @@ job_count += len(jobs) pool.userdata.next_step_jobs[sn] = [] pool.queueJobs(jobs) + participating_source_names.append(sn) stats.stepTimer('MasterTaskPut_2+', time.perf_counter() - start_time) @@ -320,8 +324,8 @@ logger.info(format_timed( start_time, - "%d pipeline jobs completed (%s, step %d)." % - (job_count, realm_name, pool.userdata.cur_step))) + "%d jobs completed (%s)." % + (job_count, ', '.join(participating_source_names)))) pool.userdata.cur_step += 1