Rewrite the build system benchmarks to be much simpler and not require bazel.
Test: ./benchmarks && ./format_benchmarks
Change-Id: I907421ed0c85e961d78342a3e58b2d4ab4aaf6ac
diff --git a/tools/perf/format_benchmarks b/tools/perf/format_benchmarks
new file mode 100755
index 0000000..4c1e38b
--- /dev/null
+++ b/tools/perf/format_benchmarks
@@ -0,0 +1,185 @@
+#!/usr/bin/env python3
+# Copyright (C) 2023 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+if __name__ == "__main__":
+ sys.dont_write_bytecode = True
+
+import argparse
+import dataclasses
+import datetime
+import json
+import os
+import pathlib
+import statistics
+import zoneinfo
+
+import pretty
+import utils
+
+# TODO:
+# - Flag if the last postroll build was more than 15 seconds or something. That's
+# an indicator that something is amiss.
+# - Add a mode to print all of the values for multi-iteration runs
+# - Add a flag to reorder the tags
+# - Add a flag to reorder the headers in order to show grouping more clearly.
+
+
+def FindSummaries(args):
+ def find_summaries(directory):
+ return [str(p.resolve()) for p in pathlib.Path(directory).glob("**/summary.json")]
+ if not args:
+ # If they didn't give an argument, use the default dir
+ root = utils.get_root()
+ if not root:
+ return []
+ return find_summaries(root.joinpath("..", utils.DEFAULT_REPORT_DIR))
+ results = list()
+ for arg in args:
+ if os.path.isfile(arg):
+ # If it's a file add that
+ results.append(arg)
+ elif os.path.isdir(arg):
+ # If it's a directory, find all of the files there
+ results += find_summaries(arg)
+ else:
+ sys.stderr.write(f"Invalid summary argument: {arg}\n")
+ sys.exit(1)
+ return sorted(list(results))
+
+
+def LoadSummary(filename):
+ with open(filename) as f:
+ return json.load(f)
+
+# Columns:
+# Date
+# Branch
+# Tag
+# --
+# Lunch
+# Rows:
+# Benchmark
+
+@dataclasses.dataclass(frozen=True)
+class Key():
+ pass
+
+class Column():
+ def __init__(self):
+ pass
+
+def lunch_str(d):
+ "Convert a lunch dict to a string"
+ return f"{d['TARGET_PRODUCT']}-{d['TARGET_RELEASE']}-{d['TARGET_BUILD_VARIANT']}"
+
+def group_by(l, key):
+ "Return a list of tuples, grouped by key, sorted by key"
+ result = {}
+ for item in l:
+ result.setdefault(key(item), []).append(item)
+ return [(k, v) for k, v in result.items()]
+
+
+class Table:
+ def __init__(self):
+ self._data = {}
+ self._rows = []
+ self._cols = []
+
+ def Set(self, column_key, row_key, data):
+ self._data[(column_key, row_key)] = data
+ if not column_key in self._cols:
+ self._cols.append(column_key)
+ if not row_key in self._rows:
+ self._rows.append(row_key)
+
+ def Write(self, out):
+ table = []
+ # Expand the column items
+ for row in zip(*self._cols):
+ if row.count(row[0]) == len(row):
+ continue
+ table.append([""] + [col for col in row])
+ if table:
+ table.append(pretty.SEPARATOR)
+ # Populate the data
+ for row in self._rows:
+ table.append([str(row)] + [str(self._data.get((col, row), "")) for col in self._cols])
+ out.write(pretty.FormatTable(table))
+
+
+def format_duration_sec(ns):
+ "Format a duration in ns to second precision"
+ sec = round(ns / 1000000000)
+ h, sec = divmod(sec, 60*60)
+ m, sec = divmod(sec, 60)
+ result = ""
+ if h > 0:
+ result += f"{h:2d}h "
+ if h > 0 or m > 0:
+ result += f"{m:2d}m "
+ return result + f"{sec:2d}s"
+
+def main(argv):
+ parser = argparse.ArgumentParser(
+ prog="format_benchmarks",
+ allow_abbrev=False, # Don't let people write unsupportable scripts.
+ description="Print analysis tables for benchmarks")
+
+ parser.add_argument("summaries", nargs="*",
+ help="A summary.json file or a directory in which to look for summaries.")
+
+ args = parser.parse_args()
+
+ # Load the summaries
+ summaries = [(s, LoadSummary(s)) for s in FindSummaries(args.summaries)]
+
+ # Convert to MTV time
+ for filename, s in summaries:
+ dt = datetime.datetime.fromisoformat(s["start_time"])
+ dt = dt.astimezone(zoneinfo.ZoneInfo("America/Los_Angeles"))
+ s["datetime"] = dt
+ s["date"] = datetime.date(dt.year, dt.month, dt.day)
+
+ # Sort the summaries
+ summaries.sort(key=lambda s: (s[1]["date"], s[1]["branch"], s[1]["tag"]))
+
+ # group the benchmarks by column and iteration
+ def bm_key(b):
+ return (
+ lunch_str(b["lunch"]),
+ )
+ for filename, summary in summaries:
+ summary["columns"] = [(key, group_by(bms, lambda b: b["id"])) for key, bms
+ in group_by(summary["benchmarks"], bm_key)]
+
+ # Build the table
+ table = Table()
+ for filename, summary in summaries:
+ for key, column in summary["columns"]:
+ for id, cell in column:
+ duration_ns = statistics.median([b["duration_ns"] for b in cell])
+ table.Set(tuple([summary["date"].strftime("YYYY-MM-DD"),
+ summary["branch"],
+ summary["tag"]]
+ + list(key)),
+ cell[0]["title"], format_duration_sec(duration_ns))
+
+ table.Write(sys.stdout)
+
+if __name__ == "__main__":
+ main(sys.argv)
+