1 # ################################################################
2 # Copyright (c) Meta Platforms, Inc. and affiliates.
5 # This source code is licensed under both the BSD-style license (found in the
6 # LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 # in the COPYING file in the root directory of this source tree).
8 # You may select, at your option, one of the above-listed licenses.
9 # ##########################################################################
21 GITHUB_API_PR_URL = "https://api.github.com/repos/facebook/zstd/pulls?state=open"
22 GITHUB_URL_TEMPLATE = "https://github.com/{}/zstd"
23 RELEASE_BUILD = {"user": "facebook", "branch": "dev", "hash": None}
25 # check to see if there are any new PRs every minute
26 DEFAULT_MAX_API_CALL_FREQUENCY_SEC = 60
27 PREVIOUS_PRS_FILENAME = "prev_prs.pk"
29 # Not sure what the threshold for triggering alarms should be
30 # 1% regression sounds like a little too sensitive but the desktop
31 # that I'm running it on is pretty stable so I think this is fine
32 CSPEED_REGRESSION_TOLERANCE = 0.01
33 DSPEED_REGRESSION_TOLERANCE = 0.01
36 def get_new_open_pr_builds(prev_state=True):
38 if os.path.exists(PREVIOUS_PRS_FILENAME):
39 with open(PREVIOUS_PRS_FILENAME, "rb") as f:
41 data = json.loads(urllib.request.urlopen(GITHUB_API_PR_URL).read().decode("utf-8"))
44 "user": d["user"]["login"],
45 "branch": d["head"]["ref"],
46 "hash": d["head"]["sha"].strip(),
50 with open(PREVIOUS_PRS_FILENAME, "wb") as f:
52 if not prev_state or prev_prs == None:
53 return list(prs.values())
54 return [pr for url, pr in prs.items() if url not in prev_prs or prev_prs[url] != pr]
57 def get_latest_hashes():
58 tmp = subprocess.run(["git", "log", "-1"], stdout=subprocess.PIPE).stdout.decode(
61 sha1 = tmp.split("\n")[0].split(" ")[1]
63 ["git", "show", "{}^1".format(sha1)], stdout=subprocess.PIPE
64 ).stdout.decode("utf-8")
65 sha2 = tmp.split("\n")[0].split(" ")[1]
67 ["git", "show", "{}^2".format(sha1)], stdout=subprocess.PIPE
68 ).stdout.decode("utf-8")
69 sha3 = "" if len(tmp) == 0 else tmp.split("\n")[0].split(" ")[1]
70 return [sha1.strip(), sha2.strip(), sha3.strip()]
73 def get_builds_for_latest_hash():
74 hashes = get_latest_hashes()
75 for b in get_new_open_pr_builds(False):
76 if b["hash"] in hashes:
81 def clone_and_build(build):
82 if build["user"] != None:
83 github_url = GITHUB_URL_TEMPLATE.format(build["user"])
86 rm -rf zstd-{user}-{sha} &&
87 git clone {github_url} zstd-{user}-{sha} &&
88 cd zstd-{user}-{sha} &&
94 github_url=github_url,
96 checkout_command="git checkout {} &&".format(build["hash"])
97 if build["hash"] != None
101 return "zstd-{user}-{sha}/zstd".format(user=build["user"], sha=build["hash"])
103 os.system("cd ../ && make -j && cd tests")
107 def parse_benchmark_output(output):
108 idx = [i for i, d in enumerate(output) if d == "MB/s"]
109 return [float(output[idx[0] - 1]), float(output[idx[1] - 1])]
112 def benchmark_single(executable, level, filename):
113 return parse_benchmark_output((
115 [executable, "-qb{}".format(level), filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
117 .stdout.decode("utf-8")
122 def benchmark_n(executable, level, filename, n):
123 speeds_arr = [benchmark_single(executable, level, filename) for _ in range(n)]
124 cspeed, dspeed = max(b[0] for b in speeds_arr), max(b[1] for b in speeds_arr)
126 "Bench (executable={} level={} filename={}, iterations={}):\n\t[cspeed: {} MB/s, dspeed: {} MB/s]".format(
127 os.path.basename(executable),
129 os.path.basename(filename),
135 return (cspeed, dspeed)
138 def benchmark(build, filenames, levels, iterations):
139 executable = clone_and_build(build)
141 [benchmark_n(executable, l, f, iterations) for f in filenames] for l in levels
145 def benchmark_dictionary_single(executable, filenames_directory, dictionary_filename, level, iterations):
146 cspeeds, dspeeds = [], []
147 for _ in range(iterations):
148 output = subprocess.run([executable, "-qb{}".format(level), "-D", dictionary_filename, "-r", filenames_directory], stdout=subprocess.PIPE).stdout.decode("utf-8").split(" ")
149 cspeed, dspeed = parse_benchmark_output(output)
150 cspeeds.append(cspeed)
151 dspeeds.append(dspeed)
152 max_cspeed, max_dspeed = max(cspeeds), max(dspeeds)
154 "Bench (executable={} level={} filenames_directory={}, dictionary_filename={}, iterations={}):\n\t[cspeed: {} MB/s, dspeed: {} MB/s]".format(
155 os.path.basename(executable),
157 os.path.basename(filenames_directory),
158 os.path.basename(dictionary_filename),
164 return (max_cspeed, max_dspeed)
167 def benchmark_dictionary(build, filenames_directory, dictionary_filename, levels, iterations):
168 executable = clone_and_build(build)
169 return [benchmark_dictionary_single(executable, filenames_directory, dictionary_filename, l, iterations) for l in levels]
172 def parse_regressions_and_labels(old_cspeed, new_cspeed, old_dspeed, new_dspeed, baseline_build, test_build):
173 cspeed_reg = (old_cspeed - new_cspeed) / old_cspeed
174 dspeed_reg = (old_dspeed - new_dspeed) / old_dspeed
175 baseline_label = "{}:{} ({})".format(
176 baseline_build["user"], baseline_build["branch"], baseline_build["hash"]
178 test_label = "{}:{} ({})".format(
179 test_build["user"], test_build["branch"], test_build["hash"]
181 return cspeed_reg, dspeed_reg, baseline_label, test_label
184 def get_regressions(baseline_build, test_build, iterations, filenames, levels):
185 old = benchmark(baseline_build, filenames, levels, iterations)
186 new = benchmark(test_build, filenames, levels, iterations)
188 for j, level in enumerate(levels):
189 for k, filename in enumerate(filenames):
190 old_cspeed, old_dspeed = old[j][k]
191 new_cspeed, new_dspeed = new[j][k]
192 cspeed_reg, dspeed_reg, baseline_label, test_label = parse_regressions_and_labels(
193 old_cspeed, new_cspeed, old_dspeed, new_dspeed, baseline_build, test_build
195 if cspeed_reg > CSPEED_REGRESSION_TOLERANCE:
197 "[COMPRESSION REGRESSION] (level={} filename={})\n\t{} -> {}\n\t{} -> {} ({:0.2f}%)".format(
207 if dspeed_reg > DSPEED_REGRESSION_TOLERANCE:
209 "[DECOMPRESSION REGRESSION] (level={} filename={})\n\t{} -> {}\n\t{} -> {} ({:0.2f}%)".format(
221 def get_regressions_dictionary(baseline_build, test_build, filenames_directory, dictionary_filename, levels, iterations):
222 old = benchmark_dictionary(baseline_build, filenames_directory, dictionary_filename, levels, iterations)
223 new = benchmark_dictionary(test_build, filenames_directory, dictionary_filename, levels, iterations)
225 for j, level in enumerate(levels):
226 old_cspeed, old_dspeed = old[j]
227 new_cspeed, new_dspeed = new[j]
228 cspeed_reg, dspeed_reg, baesline_label, test_label = parse_regressions_and_labels(
229 old_cspeed, new_cspeed, old_dspeed, new_dspeed, baseline_build, test_build
231 if cspeed_reg > CSPEED_REGRESSION_TOLERANCE:
233 "[COMPRESSION REGRESSION] (level={} filenames_directory={} dictionary_filename={})\n\t{} -> {}\n\t{} -> {} ({:0.2f}%)".format(
244 if dspeed_reg > DSPEED_REGRESSION_TOLERANCE:
246 "[DECOMPRESSION REGRESSION] (level={} filenames_directory={} dictionary_filename={})\n\t{} -> {}\n\t{} -> {} ({:0.2f}%)".format(
260 def main(filenames, levels, iterations, builds=None, emails=None, continuous=False, frequency=DEFAULT_MAX_API_CALL_FREQUENCY_SEC, dictionary_filename=None):
262 builds = get_new_open_pr_builds()
264 for test_build in builds:
265 if dictionary_filename == None:
266 regressions = get_regressions(
267 RELEASE_BUILD, test_build, iterations, filenames, levels
270 regressions = get_regressions_dictionary(
271 RELEASE_BUILD, test_build, filenames, dictionary_filename, levels, iterations
273 body = "\n".join(regressions)
274 if len(regressions) > 0:
278 echo "{}" | mutt -s "[zstd regression] caused by new pr" {}
283 print("Emails sent to {}".format(emails))
287 time.sleep(frequency)
290 if __name__ == "__main__":
291 parser = argparse.ArgumentParser()
293 parser.add_argument("--directory", help="directory with files to benchmark", default="golden-compression")
294 parser.add_argument("--levels", help="levels to test e.g. ('1,2,3')", default="1")
295 parser.add_argument("--iterations", help="number of benchmark iterations to run", default="1")
296 parser.add_argument("--emails", help="email addresses of people who will be alerted upon regression. Only for continuous mode", default=None)
297 parser.add_argument("--frequency", help="specifies the number of seconds to wait before each successive check for new PRs in continuous mode", default=DEFAULT_MAX_API_CALL_FREQUENCY_SEC)
298 parser.add_argument("--mode", help="'fastmode', 'onetime', 'current', or 'continuous' (see README.md for details)", default="current")
299 parser.add_argument("--dict", help="filename of dictionary to use (when set, this dictionary will be used to compress the files provided inside --directory)", default=None)
301 args = parser.parse_args()
302 filenames = args.directory
303 levels = [int(l) for l in args.levels.split(",")]
305 iterations = int(args.iterations)
307 frequency = int(args.frequency)
308 dictionary_filename = args.dict
310 if dictionary_filename == None:
311 filenames = glob.glob("{}/**".format(filenames))
313 if (len(filenames) == 0):
314 print("0 files found")
317 if mode == "onetime":
318 main(filenames, levels, iterations, frequency=frequenc, dictionary_filename=dictionary_filename)
319 elif mode == "current":
320 builds = [{"user": None, "branch": "None", "hash": None}]
321 main(filenames, levels, iterations, builds, frequency=frequency, dictionary_filename=dictionary_filename)
322 elif mode == "fastmode":
323 builds = [{"user": "facebook", "branch": "release", "hash": None}]
324 main(filenames, levels, iterations, builds, frequency=frequency, dictionary_filename=dictionary_filename)
326 main(filenames, levels, iterations, None, emails, True, frequency=frequency, dictionary_filename=dictionary_filename)