648db22b |
1 | #!/usr/bin/env python3 |
2 | # ################################################################ |
3 | # Copyright (c) Meta Platforms, Inc. and affiliates. |
4 | # All rights reserved. |
5 | # |
6 | # This source code is licensed under both the BSD-style license (found in the |
7 | # LICENSE file in the root directory of this source tree) and the GPLv2 (found |
8 | # in the COPYING file in the root directory of this source tree). |
9 | # You may select, at your option, one of the above-listed licenses. |
10 | # ########################################################################## |
11 | |
12 | import argparse |
13 | import contextlib |
14 | import copy |
15 | import fnmatch |
16 | import os |
17 | import shutil |
18 | import subprocess |
19 | import sys |
20 | import tempfile |
21 | import typing |
22 | |
23 | |
24 | ZSTD_SYMLINKS = [ |
25 | "zstd", |
26 | "zstdmt", |
27 | "unzstd", |
28 | "zstdcat", |
29 | "zcat", |
30 | "gzip", |
31 | "gunzip", |
32 | "gzcat", |
33 | "lzma", |
34 | "unlzma", |
35 | "xz", |
36 | "unxz", |
37 | "lz4", |
38 | "unlz4", |
39 | ] |
40 | |
41 | |
42 | EXCLUDED_DIRS = { |
43 | "bin", |
44 | "common", |
45 | "scratch", |
46 | } |
47 | |
48 | |
49 | EXCLUDED_BASENAMES = { |
50 | "setup", |
51 | "setup_once", |
52 | "teardown", |
53 | "teardown_once", |
54 | "README.md", |
55 | "run.py", |
56 | ".gitignore", |
57 | } |
58 | |
59 | EXCLUDED_SUFFIXES = [ |
60 | ".exact", |
61 | ".glob", |
62 | ".ignore", |
63 | ".exit", |
64 | ] |
65 | |
66 | |
67 | def exclude_dir(dirname: str) -> bool: |
68 | """ |
69 | Should files under the directory :dirname: be excluded from the test runner? |
70 | """ |
71 | if dirname in EXCLUDED_DIRS: |
72 | return True |
73 | return False |
74 | |
75 | |
76 | def exclude_file(filename: str) -> bool: |
77 | """Should the file :filename: be excluded from the test runner?""" |
78 | if filename in EXCLUDED_BASENAMES: |
79 | return True |
80 | for suffix in EXCLUDED_SUFFIXES: |
81 | if filename.endswith(suffix): |
82 | return True |
83 | return False |
84 | |
85 | def read_file(filename: str) -> bytes: |
86 | """Reads the file :filename: and returns the contents as bytes.""" |
87 | with open(filename, "rb") as f: |
88 | return f.read() |
89 | |
90 | |
91 | def diff(a: bytes, b: bytes) -> str: |
92 | """Returns a diff between two different byte-strings :a: and :b:.""" |
93 | assert a != b |
94 | with tempfile.NamedTemporaryFile("wb") as fa: |
95 | fa.write(a) |
96 | fa.flush() |
97 | with tempfile.NamedTemporaryFile("wb") as fb: |
98 | fb.write(b) |
99 | fb.flush() |
100 | |
101 | diff_bytes = subprocess.run(["diff", fa.name, fb.name], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout |
102 | return diff_bytes.decode("utf8") |
103 | |
104 | |
105 | def pop_line(data: bytes) -> typing.Tuple[typing.Optional[bytes], bytes]: |
106 | """ |
107 | Pop the first line from :data: and returns the first line and the remainder |
108 | of the data as a tuple. If :data: is empty, returns :(None, data):. Otherwise |
109 | the first line always ends in a :\n:, even if it is the last line and :data: |
110 | doesn't end in :\n:. |
111 | """ |
112 | NEWLINE = b"\n" |
113 | |
114 | if data == b'': |
115 | return (None, data) |
116 | |
117 | parts = data.split(NEWLINE, maxsplit=1) |
118 | line = parts[0] + NEWLINE |
119 | if len(parts) == 1: |
120 | return line, b'' |
121 | |
122 | return line, parts[1] |
123 | |
124 | |
125 | def glob_line_matches(actual: bytes, expect: bytes) -> bool: |
126 | """ |
127 | Does the `actual` line match the expected glob line `expect`? |
128 | """ |
129 | return fnmatch.fnmatchcase(actual.strip(), expect.strip()) |
130 | |
131 | |
132 | def glob_diff(actual: bytes, expect: bytes) -> bytes: |
133 | """ |
134 | Returns None if the :actual: content matches the expected glob :expect:, |
135 | otherwise returns the diff bytes. |
136 | """ |
137 | diff = b'' |
138 | actual_line, actual = pop_line(actual) |
139 | expect_line, expect = pop_line(expect) |
140 | while True: |
141 | # Handle end of file conditions - allow extra newlines |
142 | while expect_line is None and actual_line == b"\n": |
143 | actual_line, actual = pop_line(actual) |
144 | while actual_line is None and expect_line == b"\n": |
145 | expect_line, expect = pop_line(expect) |
146 | |
147 | if expect_line is None and actual_line is None: |
148 | if diff == b'': |
149 | return None |
150 | return diff |
151 | elif expect_line is None: |
152 | diff += b"---\n" |
153 | while actual_line != None: |
154 | diff += b"> " |
155 | diff += actual_line |
156 | actual_line, actual = pop_line(actual) |
157 | return diff |
158 | elif actual_line is None: |
159 | diff += b"---\n" |
160 | while expect_line != None: |
161 | diff += b"< " |
162 | diff += expect_line |
163 | expect_line, expect = pop_line(expect) |
164 | return diff |
165 | |
166 | assert expect_line is not None |
167 | assert actual_line is not None |
168 | |
169 | if expect_line == b'...\n': |
170 | next_expect_line, expect = pop_line(expect) |
171 | if next_expect_line is None: |
172 | if diff == b'': |
173 | return None |
174 | return diff |
175 | while not glob_line_matches(actual_line, next_expect_line): |
176 | actual_line, actual = pop_line(actual) |
177 | if actual_line is None: |
178 | diff += b"---\n" |
179 | diff += b"< " |
180 | diff += next_expect_line |
181 | return diff |
182 | expect_line = next_expect_line |
183 | continue |
184 | |
185 | if not glob_line_matches(actual_line, expect_line): |
186 | diff += b'---\n' |
187 | diff += b'< ' + expect_line |
188 | diff += b'> ' + actual_line |
189 | |
190 | actual_line, actual = pop_line(actual) |
191 | expect_line, expect = pop_line(expect) |
192 | |
193 | |
194 | class Options: |
195 | """Options configuring how to run a :TestCase:.""" |
196 | def __init__( |
197 | self, |
198 | env: typing.Dict[str, str], |
199 | timeout: typing.Optional[int], |
200 | verbose: bool, |
201 | preserve: bool, |
202 | scratch_dir: str, |
203 | test_dir: str, |
204 | set_exact_output: bool, |
205 | ) -> None: |
206 | self.env = env |
207 | self.timeout = timeout |
208 | self.verbose = verbose |
209 | self.preserve = preserve |
210 | self.scratch_dir = scratch_dir |
211 | self.test_dir = test_dir |
212 | self.set_exact_output = set_exact_output |
213 | |
214 | |
215 | class TestCase: |
216 | """ |
217 | Logic and state related to running a single test case. |
218 | |
219 | 1. Initialize the test case. |
220 | 2. Launch the test case with :TestCase.launch():. |
221 | This will start the test execution in a subprocess, but |
222 | not wait for completion. So you could launch multiple test |
223 | cases in parallel. This will now print any test output. |
224 | 3. Analyze the results with :TestCase.analyze():. This will |
225 | join the test subprocess, check the results against the |
226 | expectations, and print the results to stdout. |
227 | |
228 | :TestCase.run(): is also provided which combines the launch & analyze |
229 | steps for single-threaded use-cases. |
230 | |
231 | All other methods, prefixed with _, are private helper functions. |
232 | """ |
233 | def __init__(self, test_filename: str, options: Options) -> None: |
234 | """ |
235 | Initialize the :TestCase: for the test located in :test_filename: |
236 | with the given :options:. |
237 | """ |
238 | self._opts = options |
239 | self._test_file = test_filename |
240 | self._test_name = os.path.normpath( |
241 | os.path.relpath(test_filename, start=self._opts.test_dir) |
242 | ) |
243 | self._success = {} |
244 | self._message = {} |
245 | self._test_stdin = None |
246 | self._scratch_dir = os.path.abspath(os.path.join(self._opts.scratch_dir, self._test_name)) |
247 | |
248 | @property |
249 | def name(self) -> str: |
250 | """Returns the unique name for the test.""" |
251 | return self._test_name |
252 | |
253 | def launch(self) -> None: |
254 | """ |
255 | Launch the test case as a subprocess, but do not block on completion. |
256 | This allows users to run multiple tests in parallel. Results aren't yet |
257 | printed out. |
258 | """ |
259 | self._launch_test() |
260 | |
261 | def analyze(self) -> bool: |
262 | """ |
263 | Must be called after :TestCase.launch():. Joins the test subprocess and |
264 | checks the results against expectations. Finally prints the results to |
265 | stdout and returns the success. |
266 | """ |
267 | self._join_test() |
268 | self._check_exit() |
269 | self._check_stderr() |
270 | self._check_stdout() |
271 | self._analyze_results() |
272 | return self._succeeded |
273 | |
274 | def run(self) -> bool: |
275 | """Shorthand for combining both :TestCase.launch(): and :TestCase.analyze():.""" |
276 | self.launch() |
277 | return self.analyze() |
278 | |
279 | def _log(self, *args, **kwargs) -> None: |
280 | """Logs test output.""" |
281 | print(file=sys.stdout, *args, **kwargs) |
282 | |
283 | def _vlog(self, *args, **kwargs) -> None: |
284 | """Logs verbose test output.""" |
285 | if self._opts.verbose: |
286 | print(file=sys.stdout, *args, **kwargs) |
287 | |
288 | def _test_environment(self) -> typing.Dict[str, str]: |
289 | """ |
290 | Returns the environment to be used for the |
291 | test subprocess. |
292 | """ |
293 | # We want to omit ZSTD cli flags so tests will be consistent across environments |
294 | env = {k: v for k, v in os.environ.items() if not k.startswith("ZSTD")} |
295 | for k, v in self._opts.env.items(): |
296 | self._vlog(f"${k}='{v}'") |
297 | env[k] = v |
298 | return env |
299 | |
300 | def _launch_test(self) -> None: |
301 | """Launch the test subprocess, but do not join it.""" |
302 | args = [os.path.abspath(self._test_file)] |
303 | stdin_name = f"{self._test_file}.stdin" |
304 | if os.path.exists(stdin_name): |
305 | self._test_stdin = open(stdin_name, "rb") |
306 | stdin = self._test_stdin |
307 | else: |
308 | stdin = subprocess.DEVNULL |
309 | cwd = self._scratch_dir |
310 | env = self._test_environment() |
311 | self._test_process = subprocess.Popen( |
312 | args=args, |
313 | stdin=stdin, |
314 | cwd=cwd, |
315 | env=env, |
316 | stderr=subprocess.PIPE, |
317 | stdout=subprocess.PIPE |
318 | ) |
319 | |
320 | def _join_test(self) -> None: |
321 | """Join the test process and save stderr, stdout, and the exit code.""" |
322 | (stdout, stderr) = self._test_process.communicate(timeout=self._opts.timeout) |
323 | self._output = {} |
324 | self._output["stdout"] = stdout |
325 | self._output["stderr"] = stderr |
326 | self._exit_code = self._test_process.returncode |
327 | self._test_process = None |
328 | if self._test_stdin is not None: |
329 | self._test_stdin.close() |
330 | self._test_stdin = None |
331 | |
332 | def _check_output_exact(self, out_name: str, expected: bytes, exact_name: str) -> None: |
333 | """ |
334 | Check the output named :out_name: for an exact match against the :expected: content. |
335 | Saves the success and message. |
336 | """ |
337 | check_name = f"check_{out_name}" |
338 | actual = self._output[out_name] |
339 | if actual == expected: |
340 | self._success[check_name] = True |
341 | self._message[check_name] = f"{out_name} matches!" |
342 | else: |
343 | self._success[check_name] = False |
344 | self._message[check_name] = f"{out_name} does not match!\n> diff expected actual\n{diff(expected, actual)}" |
345 | |
346 | if self._opts.set_exact_output: |
347 | with open(exact_name, "wb") as f: |
348 | f.write(actual) |
349 | |
350 | def _check_output_glob(self, out_name: str, expected: bytes) -> None: |
351 | """ |
352 | Check the output named :out_name: for a glob match against the :expected: glob. |
353 | Saves the success and message. |
354 | """ |
355 | check_name = f"check_{out_name}" |
356 | actual = self._output[out_name] |
357 | diff = glob_diff(actual, expected) |
358 | if diff is None: |
359 | self._success[check_name] = True |
360 | self._message[check_name] = f"{out_name} matches!" |
361 | else: |
362 | utf8_diff = diff.decode('utf8') |
363 | self._success[check_name] = False |
364 | self._message[check_name] = f"{out_name} does not match!\n> diff expected actual\n{utf8_diff}" |
365 | |
366 | def _check_output(self, out_name: str) -> None: |
367 | """ |
368 | Checks the output named :out_name: for a match against the expectation. |
369 | We check for a .exact, .glob, and a .ignore file. If none are found we |
370 | expect that the output should be empty. |
371 | |
372 | If :Options.preserve: was set then we save the scratch directory and |
373 | save the stderr, stdout, and exit code to the scratch directory for |
374 | debugging. |
375 | """ |
376 | if self._opts.preserve: |
377 | # Save the output to the scratch directory |
378 | actual_name = os.path.join(self._scratch_dir, f"{out_name}") |
379 | with open(actual_name, "wb") as f: |
380 | f.write(self._output[out_name]) |
381 | |
382 | exact_name = f"{self._test_file}.{out_name}.exact" |
383 | glob_name = f"{self._test_file}.{out_name}.glob" |
384 | ignore_name = f"{self._test_file}.{out_name}.ignore" |
385 | |
386 | if os.path.exists(exact_name): |
387 | return self._check_output_exact(out_name, read_file(exact_name), exact_name) |
388 | elif os.path.exists(glob_name): |
389 | return self._check_output_glob(out_name, read_file(glob_name)) |
390 | else: |
391 | check_name = f"check_{out_name}" |
392 | self._success[check_name] = True |
393 | self._message[check_name] = f"{out_name} ignored!" |
394 | |
395 | def _check_stderr(self) -> None: |
396 | """Checks the stderr output against the expectation.""" |
397 | self._check_output("stderr") |
398 | |
399 | def _check_stdout(self) -> None: |
400 | """Checks the stdout output against the expectation.""" |
401 | self._check_output("stdout") |
402 | |
403 | def _check_exit(self) -> None: |
404 | """ |
405 | Checks the exit code against expectations. If a .exit file |
406 | exists, we expect that the exit code matches the contents. |
407 | Otherwise we expect the exit code to be zero. |
408 | |
409 | If :Options.preserve: is set we save the exit code to the |
410 | scratch directory under the filename "exit". |
411 | """ |
412 | if self._opts.preserve: |
413 | exit_name = os.path.join(self._scratch_dir, "exit") |
414 | with open(exit_name, "w") as f: |
415 | f.write(str(self._exit_code) + "\n") |
416 | exit_name = f"{self._test_file}.exit" |
417 | if os.path.exists(exit_name): |
418 | exit_code: int = int(read_file(exit_name)) |
419 | else: |
420 | exit_code: int = 0 |
421 | if exit_code == self._exit_code: |
422 | self._success["check_exit"] = True |
423 | self._message["check_exit"] = "Exit code matches!" |
424 | else: |
425 | self._success["check_exit"] = False |
426 | self._message["check_exit"] = f"Exit code mismatch! Expected {exit_code} but got {self._exit_code}" |
427 | |
428 | def _analyze_results(self) -> None: |
429 | """ |
430 | After all tests have been checked, collect all the successes |
431 | and messages, and print the results to stdout. |
432 | """ |
433 | STATUS = {True: "PASS", False: "FAIL"} |
434 | checks = sorted(self._success.keys()) |
435 | self._succeeded = all(self._success.values()) |
436 | self._log(f"{STATUS[self._succeeded]}: {self._test_name}") |
437 | |
438 | if not self._succeeded or self._opts.verbose: |
439 | for check in checks: |
440 | if self._opts.verbose or not self._success[check]: |
441 | self._log(f"{STATUS[self._success[check]]}: {self._test_name}.{check}") |
442 | self._log(self._message[check]) |
443 | |
444 | self._log("----------------------------------------") |
445 | |
446 | |
447 | class TestSuite: |
448 | """ |
449 | Setup & teardown test suite & cases. |
450 | This class is intended to be used as a context manager. |
451 | |
452 | TODO: Make setup/teardown failure emit messages, not throw exceptions. |
453 | """ |
454 | def __init__(self, test_directory: str, options: Options) -> None: |
455 | self._opts = options |
456 | self._test_dir = os.path.abspath(test_directory) |
457 | rel_test_dir = os.path.relpath(test_directory, start=self._opts.test_dir) |
458 | assert not rel_test_dir.startswith(os.path.sep) |
459 | self._scratch_dir = os.path.normpath(os.path.join(self._opts.scratch_dir, rel_test_dir)) |
460 | |
461 | def __enter__(self) -> 'TestSuite': |
462 | self._setup_once() |
463 | return self |
464 | |
465 | def __exit__(self, _exc_type, _exc_value, _traceback) -> None: |
466 | self._teardown_once() |
467 | |
468 | @contextlib.contextmanager |
469 | def test_case(self, test_basename: str) -> TestCase: |
470 | """ |
471 | Context manager for a test case in the test suite. |
472 | Pass the basename of the test relative to the :test_directory:. |
473 | """ |
474 | assert os.path.dirname(test_basename) == "" |
475 | try: |
476 | self._setup(test_basename) |
477 | test_filename = os.path.join(self._test_dir, test_basename) |
478 | yield TestCase(test_filename, self._opts) |
479 | finally: |
480 | self._teardown(test_basename) |
481 | |
482 | def _remove_scratch_dir(self, dir: str) -> None: |
483 | """Helper to remove a scratch directory with sanity checks""" |
484 | assert "scratch" in dir |
485 | assert dir.startswith(self._scratch_dir) |
486 | assert os.path.exists(dir) |
487 | shutil.rmtree(dir) |
488 | |
489 | def _setup_once(self) -> None: |
490 | if os.path.exists(self._scratch_dir): |
491 | self._remove_scratch_dir(self._scratch_dir) |
492 | os.makedirs(self._scratch_dir) |
493 | setup_script = os.path.join(self._test_dir, "setup_once") |
494 | if os.path.exists(setup_script): |
495 | self._run_script(setup_script, cwd=self._scratch_dir) |
496 | |
497 | def _teardown_once(self) -> None: |
498 | assert os.path.exists(self._scratch_dir) |
499 | teardown_script = os.path.join(self._test_dir, "teardown_once") |
500 | if os.path.exists(teardown_script): |
501 | self._run_script(teardown_script, cwd=self._scratch_dir) |
502 | if not self._opts.preserve: |
503 | self._remove_scratch_dir(self._scratch_dir) |
504 | |
505 | def _setup(self, test_basename: str) -> None: |
506 | test_scratch_dir = os.path.join(self._scratch_dir, test_basename) |
507 | assert not os.path.exists(test_scratch_dir) |
508 | os.makedirs(test_scratch_dir) |
509 | setup_script = os.path.join(self._test_dir, "setup") |
510 | if os.path.exists(setup_script): |
511 | self._run_script(setup_script, cwd=test_scratch_dir) |
512 | |
513 | def _teardown(self, test_basename: str) -> None: |
514 | test_scratch_dir = os.path.join(self._scratch_dir, test_basename) |
515 | assert os.path.exists(test_scratch_dir) |
516 | teardown_script = os.path.join(self._test_dir, "teardown") |
517 | if os.path.exists(teardown_script): |
518 | self._run_script(teardown_script, cwd=test_scratch_dir) |
519 | if not self._opts.preserve: |
520 | self._remove_scratch_dir(test_scratch_dir) |
521 | |
522 | def _run_script(self, script: str, cwd: str) -> None: |
523 | env = copy.copy(os.environ) |
524 | for k, v in self._opts.env.items(): |
525 | env[k] = v |
526 | try: |
527 | subprocess.run( |
528 | args=[script], |
529 | stdin=subprocess.DEVNULL, |
530 | stdout=subprocess.PIPE, |
531 | stderr=subprocess.PIPE, |
532 | cwd=cwd, |
533 | env=env, |
534 | check=True, |
535 | ) |
536 | except subprocess.CalledProcessError as e: |
537 | print(f"{script} failed with exit code {e.returncode}!") |
538 | print(f"stderr:\n{e.stderr}") |
539 | print(f"stdout:\n{e.stdout}") |
540 | raise |
541 | |
542 | TestSuites = typing.Dict[str, typing.List[str]] |
543 | |
544 | def get_all_tests(options: Options) -> TestSuites: |
545 | """ |
546 | Find all the test in the test directory and return the test suites. |
547 | """ |
548 | test_suites = {} |
549 | for root, dirs, files in os.walk(options.test_dir, topdown=True): |
550 | dirs[:] = [d for d in dirs if not exclude_dir(d)] |
551 | test_cases = [] |
552 | for file in files: |
553 | if not exclude_file(file): |
554 | test_cases.append(file) |
555 | assert root == os.path.normpath(root) |
556 | test_suites[root] = test_cases |
557 | return test_suites |
558 | |
559 | |
560 | def resolve_listed_tests( |
561 | tests: typing.List[str], options: Options |
562 | ) -> TestSuites: |
563 | """ |
564 | Resolve the list of tests passed on the command line into their |
565 | respective test suites. Tests can either be paths, or test names |
566 | relative to the test directory. |
567 | """ |
568 | test_suites = {} |
569 | for test in tests: |
570 | if not os.path.exists(test): |
571 | test = os.path.join(options.test_dir, test) |
572 | if not os.path.exists(test): |
573 | raise RuntimeError(f"Test {test} does not exist!") |
574 | |
575 | test = os.path.normpath(os.path.abspath(test)) |
576 | assert test.startswith(options.test_dir) |
577 | test_suite = os.path.dirname(test) |
578 | test_case = os.path.basename(test) |
579 | test_suites.setdefault(test_suite, []).append(test_case) |
580 | |
581 | return test_suites |
582 | |
583 | def run_tests(test_suites: TestSuites, options: Options) -> bool: |
584 | """ |
585 | Runs all the test in the :test_suites: with the given :options:. |
586 | Prints the results to stdout. |
587 | """ |
588 | tests = {} |
589 | for test_dir, test_files in test_suites.items(): |
590 | with TestSuite(test_dir, options) as test_suite: |
591 | test_files = sorted(set(test_files)) |
592 | for test_file in test_files: |
593 | with test_suite.test_case(test_file) as test_case: |
594 | tests[test_case.name] = test_case.run() |
595 | |
596 | successes = 0 |
597 | for test, status in tests.items(): |
598 | if status: |
599 | successes += 1 |
600 | else: |
601 | print(f"FAIL: {test}") |
602 | if successes == len(tests): |
603 | print(f"PASSED all {len(tests)} tests!") |
604 | return True |
605 | else: |
606 | print(f"FAILED {len(tests) - successes} / {len(tests)} tests!") |
607 | return False |
608 | |
609 | |
610 | def setup_zstd_symlink_dir(zstd_symlink_dir: str, zstd: str) -> None: |
611 | assert os.path.join("bin", "symlinks") in zstd_symlink_dir |
612 | if not os.path.exists(zstd_symlink_dir): |
613 | os.makedirs(zstd_symlink_dir) |
614 | for symlink in ZSTD_SYMLINKS: |
615 | path = os.path.join(zstd_symlink_dir, symlink) |
616 | if os.path.exists(path): |
617 | os.remove(path) |
618 | os.symlink(zstd, path) |
619 | |
620 | if __name__ == "__main__": |
621 | CLI_TEST_DIR = os.path.dirname(sys.argv[0]) |
622 | REPO_DIR = os.path.join(CLI_TEST_DIR, "..", "..") |
623 | PROGRAMS_DIR = os.path.join(REPO_DIR, "programs") |
624 | TESTS_DIR = os.path.join(REPO_DIR, "tests") |
625 | ZSTD_PATH = os.path.join(PROGRAMS_DIR, "zstd") |
626 | ZSTDGREP_PATH = os.path.join(PROGRAMS_DIR, "zstdgrep") |
627 | ZSTDLESS_PATH = os.path.join(PROGRAMS_DIR, "zstdless") |
628 | DATAGEN_PATH = os.path.join(TESTS_DIR, "datagen") |
629 | |
630 | parser = argparse.ArgumentParser( |
631 | ( |
632 | "Runs the zstd CLI tests. Exits nonzero on failure. Default arguments are\n" |
633 | "generally correct. Pass --preserve to preserve test output for debugging,\n" |
634 | "and --verbose to get verbose test output.\n" |
635 | ) |
636 | ) |
637 | parser.add_argument( |
638 | "--preserve", |
639 | action="store_true", |
640 | help="Preserve the scratch directory TEST_DIR/scratch/ for debugging purposes." |
641 | ) |
642 | parser.add_argument("--verbose", action="store_true", help="Verbose test output.") |
643 | parser.add_argument("--timeout", default=200, type=int, help="Test case timeout in seconds. Set to 0 to disable timeouts.") |
644 | parser.add_argument( |
645 | "--exec-prefix", |
646 | default=None, |
647 | help="Sets the EXEC_PREFIX environment variable. Prefix to invocations of the zstd CLI." |
648 | ) |
649 | parser.add_argument( |
650 | "--zstd", |
651 | default=ZSTD_PATH, |
652 | help="Sets the ZSTD_BIN environment variable. Path of the zstd CLI." |
653 | ) |
654 | parser.add_argument( |
655 | "--zstdgrep", |
656 | default=ZSTDGREP_PATH, |
657 | help="Sets the ZSTDGREP_BIN environment variable. Path of the zstdgrep CLI." |
658 | ) |
659 | parser.add_argument( |
660 | "--zstdless", |
661 | default=ZSTDLESS_PATH, |
662 | help="Sets the ZSTDLESS_BIN environment variable. Path of the zstdless CLI." |
663 | ) |
664 | parser.add_argument( |
665 | "--datagen", |
666 | default=DATAGEN_PATH, |
667 | help="Sets the DATAGEN_BIN environment variable. Path to the datagen CLI." |
668 | ) |
669 | parser.add_argument( |
670 | "--test-dir", |
671 | default=CLI_TEST_DIR, |
672 | help=( |
673 | "Runs the tests under this directory. " |
674 | "Adds TEST_DIR/bin/ to path. " |
675 | "Scratch directory located in TEST_DIR/scratch/." |
676 | ) |
677 | ) |
678 | parser.add_argument( |
679 | "--set-exact-output", |
680 | action="store_true", |
681 | help="Set stderr.exact and stdout.exact for all failing tests, unless .ignore or .glob already exists" |
682 | ) |
683 | parser.add_argument( |
684 | "tests", |
685 | nargs="*", |
686 | help="Run only these test cases. Can either be paths or test names relative to TEST_DIR/" |
687 | ) |
688 | args = parser.parse_args() |
689 | |
690 | if args.timeout <= 0: |
691 | args.timeout = None |
692 | |
693 | args.test_dir = os.path.normpath(os.path.abspath(args.test_dir)) |
694 | bin_dir = os.path.abspath(os.path.join(args.test_dir, "bin")) |
695 | zstd_symlink_dir = os.path.join(bin_dir, "symlinks") |
696 | scratch_dir = os.path.join(args.test_dir, "scratch") |
697 | |
698 | setup_zstd_symlink_dir(zstd_symlink_dir, os.path.abspath(args.zstd)) |
699 | |
700 | env = {} |
701 | if args.exec_prefix is not None: |
702 | env["EXEC_PREFIX"] = args.exec_prefix |
703 | env["ZSTD_SYMLINK_DIR"] = zstd_symlink_dir |
704 | env["ZSTD_REPO_DIR"] = os.path.abspath(REPO_DIR) |
705 | env["DATAGEN_BIN"] = os.path.abspath(args.datagen) |
706 | env["ZSTDGREP_BIN"] = os.path.abspath(args.zstdgrep) |
707 | env["ZSTDLESS_BIN"] = os.path.abspath(args.zstdless) |
708 | env["COMMON"] = os.path.abspath(os.path.join(args.test_dir, "common")) |
709 | env["PATH"] = bin_dir + ":" + os.getenv("PATH", "") |
710 | env["LC_ALL"] = "C" |
711 | |
712 | opts = Options( |
713 | env=env, |
714 | timeout=args.timeout, |
715 | verbose=args.verbose, |
716 | preserve=args.preserve, |
717 | test_dir=args.test_dir, |
718 | scratch_dir=scratch_dir, |
719 | set_exact_output=args.set_exact_output, |
720 | ) |
721 | |
722 | if len(args.tests) == 0: |
723 | tests = get_all_tests(opts) |
724 | else: |
725 | tests = resolve_listed_tests(args.tests, opts) |
726 | |
727 | success = run_tests(tests, opts) |
728 | if success: |
729 | sys.exit(0) |
730 | else: |
731 | sys.exit(1) |