2 # ################################################################
3 # Copyright (c) Meta Platforms, Inc. and affiliates.
6 # This source code is licensed under both the BSD-style license (found in the
7 # LICENSE file in the root directory of this source tree) and the GPLv2 (found
8 # in the COPYING file in the root directory of this source tree).
9 # You may select, at your option, one of the above-listed licenses.
10 # ##########################################################################
49 EXCLUDED_BASENAMES = {
67 def exclude_dir(dirname: str) -> bool:
69 Should files under the directory :dirname: be excluded from the test runner?
71 if dirname in EXCLUDED_DIRS:
76 def exclude_file(filename: str) -> bool:
77 """Should the file :filename: be excluded from the test runner?"""
78 if filename in EXCLUDED_BASENAMES:
80 for suffix in EXCLUDED_SUFFIXES:
81 if filename.endswith(suffix):
85 def read_file(filename: str) -> bytes:
86 """Reads the file :filename: and returns the contents as bytes."""
87 with open(filename, "rb") as f:
91 def diff(a: bytes, b: bytes) -> str:
92 """Returns a diff between two different byte-strings :a: and :b:."""
94 with tempfile.NamedTemporaryFile("wb") as fa:
97 with tempfile.NamedTemporaryFile("wb") as fb:
101 diff_bytes = subprocess.run(["diff", fa.name, fb.name], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout
102 return diff_bytes.decode("utf8")
105 def pop_line(data: bytes) -> typing.Tuple[typing.Optional[bytes], bytes]:
107 Pop the first line from :data: and returns the first line and the remainder
108 of the data as a tuple. If :data: is empty, returns :(None, data):. Otherwise
109 the first line always ends in a :\n:, even if it is the last line and :data:
117 parts = data.split(NEWLINE, maxsplit=1)
118 line = parts[0] + NEWLINE
122 return line, parts[1]
125 def glob_line_matches(actual: bytes, expect: bytes) -> bool:
127 Does the `actual` line match the expected glob line `expect`?
129 return fnmatch.fnmatchcase(actual.strip(), expect.strip())
132 def glob_diff(actual: bytes, expect: bytes) -> bytes:
134 Returns None if the :actual: content matches the expected glob :expect:,
135 otherwise returns the diff bytes.
138 actual_line, actual = pop_line(actual)
139 expect_line, expect = pop_line(expect)
141 # Handle end of file conditions - allow extra newlines
142 while expect_line is None and actual_line == b"\n":
143 actual_line, actual = pop_line(actual)
144 while actual_line is None and expect_line == b"\n":
145 expect_line, expect = pop_line(expect)
147 if expect_line is None and actual_line is None:
151 elif expect_line is None:
153 while actual_line != None:
156 actual_line, actual = pop_line(actual)
158 elif actual_line is None:
160 while expect_line != None:
163 expect_line, expect = pop_line(expect)
166 assert expect_line is not None
167 assert actual_line is not None
169 if expect_line == b'...\n':
170 next_expect_line, expect = pop_line(expect)
171 if next_expect_line is None:
175 while not glob_line_matches(actual_line, next_expect_line):
176 actual_line, actual = pop_line(actual)
177 if actual_line is None:
180 diff += next_expect_line
182 expect_line = next_expect_line
185 if not glob_line_matches(actual_line, expect_line):
187 diff += b'< ' + expect_line
188 diff += b'> ' + actual_line
190 actual_line, actual = pop_line(actual)
191 expect_line, expect = pop_line(expect)
195 """Options configuring how to run a :TestCase:."""
198 env: typing.Dict[str, str],
199 timeout: typing.Optional[int],
204 set_exact_output: bool,
207 self.timeout = timeout
208 self.verbose = verbose
209 self.preserve = preserve
210 self.scratch_dir = scratch_dir
211 self.test_dir = test_dir
212 self.set_exact_output = set_exact_output
217 Logic and state related to running a single test case.
219 1. Initialize the test case.
220 2. Launch the test case with :TestCase.launch():.
221 This will start the test execution in a subprocess, but
222 not wait for completion. So you could launch multiple test
223 cases in parallel. This will now print any test output.
224 3. Analyze the results with :TestCase.analyze():. This will
225 join the test subprocess, check the results against the
226 expectations, and print the results to stdout.
228 :TestCase.run(): is also provided which combines the launch & analyze
229 steps for single-threaded use-cases.
231 All other methods, prefixed with _, are private helper functions.
233 def __init__(self, test_filename: str, options: Options) -> None:
235 Initialize the :TestCase: for the test located in :test_filename:
236 with the given :options:.
239 self._test_file = test_filename
240 self._test_name = os.path.normpath(
241 os.path.relpath(test_filename, start=self._opts.test_dir)
245 self._test_stdin = None
246 self._scratch_dir = os.path.abspath(os.path.join(self._opts.scratch_dir, self._test_name))
249 def name(self) -> str:
250 """Returns the unique name for the test."""
251 return self._test_name
253 def launch(self) -> None:
255 Launch the test case as a subprocess, but do not block on completion.
256 This allows users to run multiple tests in parallel. Results aren't yet
261 def analyze(self) -> bool:
263 Must be called after :TestCase.launch():. Joins the test subprocess and
264 checks the results against expectations. Finally prints the results to
265 stdout and returns the success.
271 self._analyze_results()
272 return self._succeeded
274 def run(self) -> bool:
275 """Shorthand for combining both :TestCase.launch(): and :TestCase.analyze():."""
277 return self.analyze()
279 def _log(self, *args, **kwargs) -> None:
280 """Logs test output."""
281 print(file=sys.stdout, *args, **kwargs)
283 def _vlog(self, *args, **kwargs) -> None:
284 """Logs verbose test output."""
285 if self._opts.verbose:
286 print(file=sys.stdout, *args, **kwargs)
288 def _test_environment(self) -> typing.Dict[str, str]:
290 Returns the environment to be used for the
293 # We want to omit ZSTD cli flags so tests will be consistent across environments
294 env = {k: v for k, v in os.environ.items() if not k.startswith("ZSTD")}
295 for k, v in self._opts.env.items():
296 self._vlog(f"${k}='{v}'")
300 def _launch_test(self) -> None:
301 """Launch the test subprocess, but do not join it."""
302 args = [os.path.abspath(self._test_file)]
303 stdin_name = f"{self._test_file}.stdin"
304 if os.path.exists(stdin_name):
305 self._test_stdin = open(stdin_name, "rb")
306 stdin = self._test_stdin
308 stdin = subprocess.DEVNULL
309 cwd = self._scratch_dir
310 env = self._test_environment()
311 self._test_process = subprocess.Popen(
316 stderr=subprocess.PIPE,
317 stdout=subprocess.PIPE
320 def _join_test(self) -> None:
321 """Join the test process and save stderr, stdout, and the exit code."""
322 (stdout, stderr) = self._test_process.communicate(timeout=self._opts.timeout)
324 self._output["stdout"] = stdout
325 self._output["stderr"] = stderr
326 self._exit_code = self._test_process.returncode
327 self._test_process = None
328 if self._test_stdin is not None:
329 self._test_stdin.close()
330 self._test_stdin = None
332 def _check_output_exact(self, out_name: str, expected: bytes, exact_name: str) -> None:
334 Check the output named :out_name: for an exact match against the :expected: content.
335 Saves the success and message.
337 check_name = f"check_{out_name}"
338 actual = self._output[out_name]
339 if actual == expected:
340 self._success[check_name] = True
341 self._message[check_name] = f"{out_name} matches!"
343 self._success[check_name] = False
344 self._message[check_name] = f"{out_name} does not match!\n> diff expected actual\n{diff(expected, actual)}"
346 if self._opts.set_exact_output:
347 with open(exact_name, "wb") as f:
350 def _check_output_glob(self, out_name: str, expected: bytes) -> None:
352 Check the output named :out_name: for a glob match against the :expected: glob.
353 Saves the success and message.
355 check_name = f"check_{out_name}"
356 actual = self._output[out_name]
357 diff = glob_diff(actual, expected)
359 self._success[check_name] = True
360 self._message[check_name] = f"{out_name} matches!"
362 utf8_diff = diff.decode('utf8')
363 self._success[check_name] = False
364 self._message[check_name] = f"{out_name} does not match!\n> diff expected actual\n{utf8_diff}"
366 def _check_output(self, out_name: str) -> None:
368 Checks the output named :out_name: for a match against the expectation.
369 We check for a .exact, .glob, and a .ignore file. If none are found we
370 expect that the output should be empty.
372 If :Options.preserve: was set then we save the scratch directory and
373 save the stderr, stdout, and exit code to the scratch directory for
376 if self._opts.preserve:
377 # Save the output to the scratch directory
378 actual_name = os.path.join(self._scratch_dir, f"{out_name}")
379 with open(actual_name, "wb") as f:
380 f.write(self._output[out_name])
382 exact_name = f"{self._test_file}.{out_name}.exact"
383 glob_name = f"{self._test_file}.{out_name}.glob"
384 ignore_name = f"{self._test_file}.{out_name}.ignore"
386 if os.path.exists(exact_name):
387 return self._check_output_exact(out_name, read_file(exact_name), exact_name)
388 elif os.path.exists(glob_name):
389 return self._check_output_glob(out_name, read_file(glob_name))
391 check_name = f"check_{out_name}"
392 self._success[check_name] = True
393 self._message[check_name] = f"{out_name} ignored!"
395 def _check_stderr(self) -> None:
396 """Checks the stderr output against the expectation."""
397 self._check_output("stderr")
399 def _check_stdout(self) -> None:
400 """Checks the stdout output against the expectation."""
401 self._check_output("stdout")
403 def _check_exit(self) -> None:
405 Checks the exit code against expectations. If a .exit file
406 exists, we expect that the exit code matches the contents.
407 Otherwise we expect the exit code to be zero.
409 If :Options.preserve: is set we save the exit code to the
410 scratch directory under the filename "exit".
412 if self._opts.preserve:
413 exit_name = os.path.join(self._scratch_dir, "exit")
414 with open(exit_name, "w") as f:
415 f.write(str(self._exit_code) + "\n")
416 exit_name = f"{self._test_file}.exit"
417 if os.path.exists(exit_name):
418 exit_code: int = int(read_file(exit_name))
421 if exit_code == self._exit_code:
422 self._success["check_exit"] = True
423 self._message["check_exit"] = "Exit code matches!"
425 self._success["check_exit"] = False
426 self._message["check_exit"] = f"Exit code mismatch! Expected {exit_code} but got {self._exit_code}"
428 def _analyze_results(self) -> None:
430 After all tests have been checked, collect all the successes
431 and messages, and print the results to stdout.
433 STATUS = {True: "PASS", False: "FAIL"}
434 checks = sorted(self._success.keys())
435 self._succeeded = all(self._success.values())
436 self._log(f"{STATUS[self._succeeded]}: {self._test_name}")
438 if not self._succeeded or self._opts.verbose:
440 if self._opts.verbose or not self._success[check]:
441 self._log(f"{STATUS[self._success[check]]}: {self._test_name}.{check}")
442 self._log(self._message[check])
444 self._log("----------------------------------------")
449 Setup & teardown test suite & cases.
450 This class is intended to be used as a context manager.
452 TODO: Make setup/teardown failure emit messages, not throw exceptions.
454 def __init__(self, test_directory: str, options: Options) -> None:
456 self._test_dir = os.path.abspath(test_directory)
457 rel_test_dir = os.path.relpath(test_directory, start=self._opts.test_dir)
458 assert not rel_test_dir.startswith(os.path.sep)
459 self._scratch_dir = os.path.normpath(os.path.join(self._opts.scratch_dir, rel_test_dir))
461 def __enter__(self) -> 'TestSuite':
465 def __exit__(self, _exc_type, _exc_value, _traceback) -> None:
466 self._teardown_once()
468 @contextlib.contextmanager
469 def test_case(self, test_basename: str) -> TestCase:
471 Context manager for a test case in the test suite.
472 Pass the basename of the test relative to the :test_directory:.
474 assert os.path.dirname(test_basename) == ""
476 self._setup(test_basename)
477 test_filename = os.path.join(self._test_dir, test_basename)
478 yield TestCase(test_filename, self._opts)
480 self._teardown(test_basename)
482 def _remove_scratch_dir(self, dir: str) -> None:
483 """Helper to remove a scratch directory with sanity checks"""
484 assert "scratch" in dir
485 assert dir.startswith(self._scratch_dir)
486 assert os.path.exists(dir)
489 def _setup_once(self) -> None:
490 if os.path.exists(self._scratch_dir):
491 self._remove_scratch_dir(self._scratch_dir)
492 os.makedirs(self._scratch_dir)
493 setup_script = os.path.join(self._test_dir, "setup_once")
494 if os.path.exists(setup_script):
495 self._run_script(setup_script, cwd=self._scratch_dir)
497 def _teardown_once(self) -> None:
498 assert os.path.exists(self._scratch_dir)
499 teardown_script = os.path.join(self._test_dir, "teardown_once")
500 if os.path.exists(teardown_script):
501 self._run_script(teardown_script, cwd=self._scratch_dir)
502 if not self._opts.preserve:
503 self._remove_scratch_dir(self._scratch_dir)
505 def _setup(self, test_basename: str) -> None:
506 test_scratch_dir = os.path.join(self._scratch_dir, test_basename)
507 assert not os.path.exists(test_scratch_dir)
508 os.makedirs(test_scratch_dir)
509 setup_script = os.path.join(self._test_dir, "setup")
510 if os.path.exists(setup_script):
511 self._run_script(setup_script, cwd=test_scratch_dir)
513 def _teardown(self, test_basename: str) -> None:
514 test_scratch_dir = os.path.join(self._scratch_dir, test_basename)
515 assert os.path.exists(test_scratch_dir)
516 teardown_script = os.path.join(self._test_dir, "teardown")
517 if os.path.exists(teardown_script):
518 self._run_script(teardown_script, cwd=test_scratch_dir)
519 if not self._opts.preserve:
520 self._remove_scratch_dir(test_scratch_dir)
522 def _run_script(self, script: str, cwd: str) -> None:
523 env = copy.copy(os.environ)
524 for k, v in self._opts.env.items():
529 stdin=subprocess.DEVNULL,
530 stdout=subprocess.PIPE,
531 stderr=subprocess.PIPE,
536 except subprocess.CalledProcessError as e:
537 print(f"{script} failed with exit code {e.returncode}!")
538 print(f"stderr:\n{e.stderr}")
539 print(f"stdout:\n{e.stdout}")
542 TestSuites = typing.Dict[str, typing.List[str]]
544 def get_all_tests(options: Options) -> TestSuites:
546 Find all the test in the test directory and return the test suites.
549 for root, dirs, files in os.walk(options.test_dir, topdown=True):
550 dirs[:] = [d for d in dirs if not exclude_dir(d)]
553 if not exclude_file(file):
554 test_cases.append(file)
555 assert root == os.path.normpath(root)
556 test_suites[root] = test_cases
560 def resolve_listed_tests(
561 tests: typing.List[str], options: Options
564 Resolve the list of tests passed on the command line into their
565 respective test suites. Tests can either be paths, or test names
566 relative to the test directory.
570 if not os.path.exists(test):
571 test = os.path.join(options.test_dir, test)
572 if not os.path.exists(test):
573 raise RuntimeError(f"Test {test} does not exist!")
575 test = os.path.normpath(os.path.abspath(test))
576 assert test.startswith(options.test_dir)
577 test_suite = os.path.dirname(test)
578 test_case = os.path.basename(test)
579 test_suites.setdefault(test_suite, []).append(test_case)
583 def run_tests(test_suites: TestSuites, options: Options) -> bool:
585 Runs all the test in the :test_suites: with the given :options:.
586 Prints the results to stdout.
589 for test_dir, test_files in test_suites.items():
590 with TestSuite(test_dir, options) as test_suite:
591 test_files = sorted(set(test_files))
592 for test_file in test_files:
593 with test_suite.test_case(test_file) as test_case:
594 tests[test_case.name] = test_case.run()
597 for test, status in tests.items():
601 print(f"FAIL: {test}")
602 if successes == len(tests):
603 print(f"PASSED all {len(tests)} tests!")
606 print(f"FAILED {len(tests) - successes} / {len(tests)} tests!")
610 def setup_zstd_symlink_dir(zstd_symlink_dir: str, zstd: str) -> None:
611 assert os.path.join("bin", "symlinks") in zstd_symlink_dir
612 if not os.path.exists(zstd_symlink_dir):
613 os.makedirs(zstd_symlink_dir)
614 for symlink in ZSTD_SYMLINKS:
615 path = os.path.join(zstd_symlink_dir, symlink)
616 if os.path.exists(path):
618 os.symlink(zstd, path)
620 if __name__ == "__main__":
621 CLI_TEST_DIR = os.path.dirname(sys.argv[0])
622 REPO_DIR = os.path.join(CLI_TEST_DIR, "..", "..")
623 PROGRAMS_DIR = os.path.join(REPO_DIR, "programs")
624 TESTS_DIR = os.path.join(REPO_DIR, "tests")
625 ZSTD_PATH = os.path.join(PROGRAMS_DIR, "zstd")
626 ZSTDGREP_PATH = os.path.join(PROGRAMS_DIR, "zstdgrep")
627 ZSTDLESS_PATH = os.path.join(PROGRAMS_DIR, "zstdless")
628 DATAGEN_PATH = os.path.join(TESTS_DIR, "datagen")
630 parser = argparse.ArgumentParser(
632 "Runs the zstd CLI tests. Exits nonzero on failure. Default arguments are\n"
633 "generally correct. Pass --preserve to preserve test output for debugging,\n"
634 "and --verbose to get verbose test output.\n"
640 help="Preserve the scratch directory TEST_DIR/scratch/ for debugging purposes."
642 parser.add_argument("--verbose", action="store_true", help="Verbose test output.")
643 parser.add_argument("--timeout", default=200, type=int, help="Test case timeout in seconds. Set to 0 to disable timeouts.")
647 help="Sets the EXEC_PREFIX environment variable. Prefix to invocations of the zstd CLI."
652 help="Sets the ZSTD_BIN environment variable. Path of the zstd CLI."
656 default=ZSTDGREP_PATH,
657 help="Sets the ZSTDGREP_BIN environment variable. Path of the zstdgrep CLI."
661 default=ZSTDLESS_PATH,
662 help="Sets the ZSTDLESS_BIN environment variable. Path of the zstdless CLI."
666 default=DATAGEN_PATH,
667 help="Sets the DATAGEN_BIN environment variable. Path to the datagen CLI."
671 default=CLI_TEST_DIR,
673 "Runs the tests under this directory. "
674 "Adds TEST_DIR/bin/ to path. "
675 "Scratch directory located in TEST_DIR/scratch/."
679 "--set-exact-output",
681 help="Set stderr.exact and stdout.exact for all failing tests, unless .ignore or .glob already exists"
686 help="Run only these test cases. Can either be paths or test names relative to TEST_DIR/"
688 args = parser.parse_args()
690 if args.timeout <= 0:
693 args.test_dir = os.path.normpath(os.path.abspath(args.test_dir))
694 bin_dir = os.path.abspath(os.path.join(args.test_dir, "bin"))
695 zstd_symlink_dir = os.path.join(bin_dir, "symlinks")
696 scratch_dir = os.path.join(args.test_dir, "scratch")
698 setup_zstd_symlink_dir(zstd_symlink_dir, os.path.abspath(args.zstd))
701 if args.exec_prefix is not None:
702 env["EXEC_PREFIX"] = args.exec_prefix
703 env["ZSTD_SYMLINK_DIR"] = zstd_symlink_dir
704 env["ZSTD_REPO_DIR"] = os.path.abspath(REPO_DIR)
705 env["DATAGEN_BIN"] = os.path.abspath(args.datagen)
706 env["ZSTDGREP_BIN"] = os.path.abspath(args.zstdgrep)
707 env["ZSTDLESS_BIN"] = os.path.abspath(args.zstdless)
708 env["COMMON"] = os.path.abspath(os.path.join(args.test_dir, "common"))
709 env["PATH"] = bin_dir + ":" + os.getenv("PATH", "")
714 timeout=args.timeout,
715 verbose=args.verbose,
716 preserve=args.preserve,
717 test_dir=args.test_dir,
718 scratch_dir=scratch_dir,
719 set_exact_output=args.set_exact_output,
722 if len(args.tests) == 0:
723 tests = get_all_tests(opts)
725 tests = resolve_listed_tests(args.tests, opts)
727 success = run_tests(tests, opts)