|
|
|
import os |
|
import sys |
|
import argparse |
|
import copy |
|
from datetime import datetime |
|
import common_util |
|
import signal |
|
|
|
test_dir = os.path.dirname(os.path.realpath(__file__)) |
|
|
|
TESTS = ['test_config', 'test_read'] |
|
|
|
|
|
USE_PYTEST_LIST = [] |
|
|
|
CUSTOM_HANDLERS = {} |
|
|
|
|
|
def print_to_stderr(message): |
|
print(message, file=sys.stderr) |
|
|
|
|
|
def parse_test_module(test): |
|
return test.split('.')[0] |
|
|
|
|
|
class TestChoices(list): |
|
def __init__(self, *args, **kwargs): |
|
super(TestChoices, self).__init__(args[0]) |
|
|
|
def __contains__(self, item): |
|
return list.__contains__(self, parse_test_module(item)) |
|
|
|
|
|
def parse_args(): |
|
parser = argparse.ArgumentParser( |
|
description='Run the Petrel unit test suite', |
|
epilog='where TESTS is any of: {}'.format(', '.join(TESTS))) |
|
|
|
parser.add_argument( |
|
'-pt', |
|
'--pytest', |
|
action='store_true', |
|
help='If true, use `pytest` to execute the tests. E.g., this runs ' |
|
'python run_test.py -pt') |
|
|
|
parser.add_argument( |
|
'-i', |
|
'--include', |
|
nargs='+', |
|
choices=TestChoices(TESTS), |
|
default=TESTS, |
|
metavar='TESTS', |
|
help='select a set of tests to include (defaults to ALL tests).' |
|
' tests are specified with module name') |
|
|
|
parser.add_argument('-x', |
|
'--exclude', |
|
nargs='+', |
|
choices=TESTS, |
|
metavar='TESTS', |
|
default=[], |
|
help='select a set of tests to exclude') |
|
|
|
parser.add_argument( |
|
'--continue-through-error', |
|
action='store_true', |
|
help='Runs the full test suite despite one of the tests failing') |
|
|
|
parser.add_argument( |
|
'additional_unittest_args', |
|
nargs='*', |
|
help='additional arguments passed through to unittest, e.g., ' |
|
'python run_test.py -i test_config -- -s test_report.log' |
|
'to save test report in test_report.log') |
|
|
|
return parser.parse_args() |
|
|
|
|
|
def exclude_tests(exclude_list, selected_tests, exclude_message=None): |
|
for exclude_test in exclude_list: |
|
tests_copy = selected_tests[:] |
|
for test in tests_copy: |
|
if test.startswith(exclude_test): |
|
if exclude_message is not None: |
|
print_to_stderr('Excluding {} {}'.format( |
|
test, exclude_message)) |
|
selected_tests.remove(test) |
|
return selected_tests |
|
|
|
|
|
def get_selected_tests(options): |
|
selected_tests = options.include |
|
selected_tests = exclude_tests(options.exclude, selected_tests) |
|
return selected_tests |
|
|
|
|
|
def get_executable_command(options, allow_pytest): |
|
executable = [sys.executable] |
|
|
|
if options.pytest: |
|
if allow_pytest: |
|
executable += ['-m', 'pytest'] |
|
else: |
|
print_to_stderr( |
|
'Pytest cannot be used for this test. Falling back to unittest.' |
|
) |
|
return executable |
|
|
|
|
|
def run_test(test_module, |
|
test_directory, |
|
options, |
|
launcher_cmd=None, |
|
extra_unittest_args=None): |
|
unittest_args = options.additional_unittest_args.copy() |
|
|
|
if extra_unittest_args: |
|
assert isinstance(extra_unittest_args, list) |
|
unittest_args.extend(extra_unittest_args) |
|
|
|
|
|
if options.pytest: |
|
unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args] |
|
|
|
|
|
|
|
print(test_module) |
|
argv = [test_module + '.py'] + unittest_args |
|
|
|
|
|
executable = get_executable_command(options, |
|
allow_pytest=not extra_unittest_args) |
|
|
|
command = (launcher_cmd or []) + executable + argv |
|
print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now())) |
|
return common_util.shell(command, test_directory) |
|
|
|
|
|
|
|
SIGNALS_TO_NAMES_DICT = { |
|
getattr(signal, n): n |
|
for n in dir(signal) if n.startswith('SIG') and '_' not in n |
|
} |
|
|
|
|
|
def run_test_module(test, test_directory, options): |
|
test_module = parse_test_module(test) |
|
|
|
|
|
print_to_stderr('Running {} ... [{}]'.format(test, datetime.now())) |
|
handler = CUSTOM_HANDLERS.get(test, run_test) |
|
return_code = handler(test_module, test_directory, options) |
|
assert isinstance(return_code, int) and not isinstance( |
|
return_code, bool), 'Return code should be an integer' |
|
|
|
if return_code == 0: |
|
return None |
|
|
|
message = '{test} failed!' |
|
if return_code < 0: |
|
|
|
|
|
signal_name = SIGNALS_TO_NAMES_DICT[-return_code] |
|
print(signal_name) |
|
message += ' Received signal: ' + signal_name |
|
return message |
|
|
|
|
|
def main(): |
|
options = parse_args() |
|
test_directory = os.path.dirname(os.path.abspath(__file__)) |
|
selected_tests = get_selected_tests(options) |
|
|
|
failure_messages = [] |
|
has_failed = False |
|
for test in selected_tests: |
|
options_clone = copy.deepcopy(options) |
|
if test in USE_PYTEST_LIST: |
|
options_clone.pytest = True |
|
err_message = run_test_module(test, test_directory, options_clone) |
|
if err_message is None: |
|
continue |
|
has_failed = True |
|
failure_messages.append(err_message) |
|
if not options_clone.continue_through_error: |
|
raise RuntimeError(err_message) |
|
print_to_stderr(err_message) |
|
|
|
if options.continue_through_error and has_failed: |
|
for err in failure_messages: |
|
print_to_stderr(err) |
|
sys.exit(1) |
|
|
|
|
|
if __name__ == '__main__': |
|
main() |
|
|