diff options
Diffstat (limited to 'tools/perf/benchmarks')
| -rwxr-xr-x | tools/perf/benchmarks | 32 |
1 files changed, 26 insertions, 6 deletions
diff --git a/tools/perf/benchmarks b/tools/perf/benchmarks index 38715ea8ea..267c315c63 100755 --- a/tools/perf/benchmarks +++ b/tools/perf/benchmarks @@ -381,17 +381,24 @@ class Runner(): self._reports.append(report) # Preroll builds - for i in range(benchmark.preroll): - ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"pre_{i}"), benchmark) - report.preroll_duration_ns.append(ns) + if not self._options.ApplyOnly(): + for i in range(benchmark.preroll): + ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"pre_{i}"), benchmark, {}) + report.preroll_duration_ns.append(ns) sys.stderr.write(f"PERFORMING CHANGE: {benchmark.change.label}\n") if not self._options.DryRun(): benchmark.change.change() + if self._options.ApplyOnly(): + sys.stderr.write(f"NOT UNDOING CHANGE: {benchmark.change.label}\n") + return try: + extra_env = { + "SOONG_HONOR_USE_PARTIAL_COMPILE": "true", + } # Measured build - ns = self._run_build(lunch, benchmark_log_dir.joinpath("measured"), benchmark) + ns = self._run_build(lunch, benchmark_log_dir.joinpath("measured"), benchmark, extra_env) report.duration_ns = ns dist_one = self._options.DistOne() @@ -403,7 +410,7 @@ class Runner(): # Postroll builds for i in range(benchmark.postroll): ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"post_{i}"), - benchmark) + benchmark, {}) report.postroll_duration_ns.append(ns) finally: @@ -422,7 +429,7 @@ class Runner(): path += ("/%0" + str(len(str(self._options.Iterations()))) + "d") % iteration return path - def _run_build(self, lunch, build_log_dir, benchmark): + def _run_build(self, lunch, build_log_dir, benchmark, extra_env): """Builds the modules. Saves interesting log files to log_dir. Raises FatalError if the build fails. """ @@ -437,6 +444,8 @@ class Runner(): env["TARGET_PRODUCT"] = lunch.target_product env["TARGET_RELEASE"] = lunch.target_release env["TARGET_BUILD_VARIANT"] = lunch.target_build_variant + for k, v in extra_env.items(): + env[k] = v returncode = subprocess.call(cmd, env=env) if returncode != 0: report_error(f"Build failed: {' '.join(cmd)}") @@ -564,6 +573,8 @@ benchmarks: help="Benchmarks to run. Default suite will be run if omitted.") parser.add_argument("--list", action="store_true", help="list the available benchmarks. No benchmark is run.") + parser.add_argument("--apply-only", action="store_true", + help="apply the change only, and then exit. Intended only for debugging.") parser.add_argument("--dist-one", action="store_true", help="Copy logs and metrics to the given dist dir. Requires that only" + " one benchmark be supplied. Postroll steps will be skipped.") @@ -585,6 +596,12 @@ benchmarks: if self._args.dist_one and len(self.Benchmarks()) != 1: self._error("--dist-one requires exactly one --benchmark.") + # --apply-only forces --iterations=1 + if self._args.apply_only: + self._args.iterations = 1 + if self._args.dist_one: + self._error("--dist-one cannot be used with --apply-only.") + if self._had_error: raise FatalError() @@ -630,6 +647,9 @@ benchmarks: def Tag(self): return self._args.tag + def ApplyOnly(self): + return self._args.apply_only + def DryRun(self): return self._args.dry_run |
