|
23 | 23 | def _file_path(when):
|
24 | 24 | return Path(f'.pytest_parallel/{args._session_folder}/_partial/{args._test_idx}_{when}')
|
25 | 25 |
|
26 |
| -test_info = {'test_idx': args._test_idx, 'fatal_error': None} # TODO no fatal_error=None (absense means no error) |
| 26 | +def _longrepr_from_str(msg): |
| 27 | + trace_back = ReprTraceback([ReprEntryNative(msg)], None, None) |
| 28 | + collect_longrepr = [] |
| 29 | + collect_longrepr.append( |
| 30 | + (trace_back, None, None) |
| 31 | + ) |
| 32 | + return ExceptionChainRepr(collect_longrepr) |
27 | 33 |
|
28 |
| -# 'fatal_error' file |
29 |
| -file_path = _file_path('fatal_error') |
30 |
| -if file_path.exists(): |
31 |
| - with open(file_path, 'r', encoding='utf-8') as file: |
32 |
| - fatal_error = file.read() |
33 |
| - test_info['fatal_error'] = fatal_error |
34 | 34 |
|
| 35 | +def _fill_test_info_from_report(test_info, when): |
| 36 | + assert when in ['setup', 'call', 'teardown'] |
35 | 37 |
|
36 |
| -# 'setup/call/teardown' files |
37 |
| -already_failed = False |
38 |
| -for when in ('setup', 'call', 'teardown'): |
39 | 38 | file_path = _file_path(when)
|
40 | 39 | if file_path.exists():
|
41 | 40 | try:
|
42 | 41 | with open(file_path, 'rb') as file:
|
43 | 42 | report_info = file.read()
|
44 | 43 | report_info = pickle.loads(report_info)
|
45 | 44 | test_info[when] = report_info
|
| 45 | + failed = report_info['outcome'] == 'failed' |
46 | 46 | except pickle.PickleError:
|
47 | 47 | test_info['fatal_error'] = f'FATAL ERROR in pytest_parallel : unable to decode {file_path}'
|
| 48 | + failed = True |
48 | 49 | else: # Supposedly not found because the test crashed before writing the file
|
49 |
| - collect_longrepr = [] |
50 |
| - msg = 'Error: the test crashed. ' |
| 50 | + msg = f'Error: the test crashed during `{when}` phase. ' |
51 | 51 | red = 31
|
52 | 52 | bold = 1
|
53 | 53 | msg = f'\x1b[{red}m' + f'\x1b[{bold}m' + msg+ '\x1b[0m'
|
54 | 54 | msg += f'Log file: {args._test_name}\n'
|
55 |
| - trace_back = ReprTraceback([ReprEntryNative(msg)], None, None) |
56 |
| - collect_longrepr.append( |
57 |
| - (trace_back, None, None) |
58 |
| - ) |
59 |
| - longrepr = ExceptionChainRepr(collect_longrepr) |
60 |
| - |
61 |
| - outcome = 'passed' if already_failed else 'failed' # No need to report the error twice |
62 |
| - test_info[when] = {'outcome' : outcome, |
| 55 | + longrepr = _longrepr_from_str(msg) |
| 56 | + |
| 57 | + test_info[when] = {'outcome' : 'failed', |
63 | 58 | 'longrepr': longrepr,
|
64 | 59 | 'duration': 0, } # unable to report accurately
|
65 | 60 |
|
66 |
| - already_failed = True |
| 61 | + failed = True |
| 62 | + return failed |
| 63 | + |
| 64 | +def _retrieve_test_info(): |
| 65 | + test_info = {'test_idx': args._test_idx, 'fatal_error': None} # TODO no fatal_error=None (absense means no error) |
| 66 | + for when in ('setup', 'call', 'teardown'): |
| 67 | + test_info[when] = {'outcome' : 'passed', |
| 68 | + 'longrepr': _longrepr_from_str(''), |
| 69 | + 'duration': 0, } |
| 70 | + |
| 71 | + # During test execution, the following files are created in order: |
| 72 | + # 1. before_import |
| 73 | + # 2. collect |
| 74 | + # 3. pre_run_error |
| 75 | + # 4. setup |
| 76 | + # 5. call |
| 77 | + # 6. teardown |
| 78 | + # if one of the file is missing, it means there was a crash (except for `pre_run_error`, where it is the other way around) |
| 79 | + |
| 80 | + # 1. if `before_import` is not present, we crashed at the very begining |
| 81 | + if not _file_path('before_import').exists(): |
| 82 | + test_info['fatal_error'] = 'FATAL ERROR in pytest_parallel early processing\n' |
| 83 | + test_info['fatal_error'] += f'Log file: {args._test_name}\n' |
| 84 | + return test_info |
| 85 | + |
| 86 | + # 2. handle collection |
| 87 | + if not _file_path('collect').exists(): # if `collect` is not present, we crashed during the test collection |
| 88 | + test_info['fatal_error'] = 'FATAL ERROR in pytest_parallel during test collection\n' |
| 89 | + test_info['fatal_error'] += f'Log file: {args._test_name}\n' |
| 90 | + return test_info |
| 91 | + else: # else we report if the collection failed |
| 92 | + with open(_file_path('collect'), 'rb') as file: |
| 93 | + report_info = file.read() |
| 94 | + report_info = pickle.loads(report_info) |
| 95 | + if report_info['outcome'] == 'failed': |
| 96 | + # Note: |
| 97 | + # We could send report_info['longrepr'] to master so that it reports it directly |
| 98 | + # However, it would be confusing, because master also did the collection phase with no error |
| 99 | + # (if there were an error, the worker would not run in the first place) |
| 100 | + # To make it clear that the error appears on the worker only, better refer to the report of the worker |
| 101 | + msg = f'Error: the test crashed during `collect` phase. ' |
| 102 | + red = 31 |
| 103 | + bold = 1 |
| 104 | + msg = f'\x1b[{red}m' + f'\x1b[{bold}m' + msg+ '\x1b[0m' |
| 105 | + msg += f'Log file: {args._test_name}\n' |
| 106 | + longrepr = _longrepr_from_str(msg) |
| 107 | + |
| 108 | + # report as a setup failure (because indeed, the worker failed to setup the test by failing to collect it) |
| 109 | + test_info['setup'] = {'outcome' : 'failed', |
| 110 | + 'longrepr': longrepr, |
| 111 | + 'duration': 0, } # unable to report accurately |
| 112 | + return test_info |
| 113 | + |
| 114 | + # 3. if `pre_run_error` is present, there was a fatal error in the pytest_parallel test handling |
| 115 | + file_path = _file_path('pre_run_error') |
| 116 | + if file_path.exists(): |
| 117 | + with open(file_path, 'r', encoding='utf-8') as file: |
| 118 | + pre_run_error_msg = file.read() |
| 119 | + test_info['fatal_error'] = pre_run_error_msg |
| 120 | + return test_info |
| 121 | + |
| 122 | + # 4.,5.,6.: 'setup/call/teardown' files |
| 123 | + for when in ('setup', 'call', 'teardown'): |
| 124 | + failed = _fill_test_info_from_report(test_info, when) |
| 125 | + if failed: |
| 126 | + return test_info |
| 127 | + |
| 128 | + return test_info |
| 129 | + |
| 130 | + |
| 131 | + |
67 | 132 |
|
| 133 | +test_info = _retrieve_test_info() |
68 | 134 |
|
69 | 135 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
70 | 136 | s.connect((args._scheduler_ip_address, args._scheduler_port))
|
|
0 commit comments