diff --git a/tests/functional/botocore/test_h2_required.py b/tests/functional/botocore/test_h2_required.py index 97aa73b58bd5..c0673d8116a7 100644 --- a/tests/functional/botocore/test_h2_required.py +++ b/tests/functional/botocore/test_h2_required.py @@ -19,8 +19,8 @@ 'qbusiness': ['Chat'], 'kinesis': ['SubscribeToShard'], 'lexv2-runtime': ['StartConversation'], - # Added only to keep a record of this feature being incompatible - 'bedrock-runtime': ['InvokeModelWithBidirectionalStream'], + # Added only to keep a record of this feature being incompatible + 'bedrock-runtime': ['InvokeModelWithBidirectionalStream'], } @@ -53,16 +53,21 @@ def _all_test_cases(): @pytest.mark.validates_models @pytest.mark.parametrize("h2_service", H2_SERVICES) -def test_all_uses_of_h2_are_known(h2_service): +def test_all_uses_of_h2_are_known(h2_service, record_property): # Validates that a service that requires HTTP 2 for all operations is known message = f'Found unknown HTTP 2 service: {h2_service}' + # Store the service name in a PyTest custom property + record_property('aws_service', h2_service) assert _KNOWN_SERVICES.get(h2_service) is _H2_REQUIRED, message @pytest.mark.validates_models @pytest.mark.parametrize("h2_service, operation", H2_OPERATIONS) -def test_all_h2_operations_are_known(h2_service, operation): +def test_all_h2_operations_are_known(h2_service, operation, record_property): # Validates that an operation that requires HTTP 2 is known known_operations = _KNOWN_SERVICES.get(h2_service, []) message = f'Found unknown HTTP 2 operation: {h2_service}.{operation}' + # Store the service name and operation in PyTest custom properties + record_property('aws_service', h2_service) + record_property('aws_operation', operation) assert operation in known_operations, message diff --git a/tests/functional/botocore/test_paginator_config.py b/tests/functional/botocore/test_paginator_config.py index 45af7578ec7a..e77371ae31e3 100644 --- a/tests/functional/botocore/test_paginator_config.py +++ b/tests/functional/botocore/test_paginator_config.py @@ -149,14 +149,23 @@ def _pagination_configs(): @pytest.mark.parametrize( "operation_name, page_config, service_model", _pagination_configs() ) -def test_lint_pagination_configs(operation_name, page_config, service_model): +def test_lint_pagination_configs( + operation_name, page_config, service_model, record_property +): + # Store common details of the operation + record_property('aws_service', service_model.service_name) + record_property('aws_operation', operation_name) _validate_known_pagination_keys(page_config) _validate_result_key_exists(page_config) _validate_referenced_operation_exists(operation_name, service_model) - _validate_operation_has_output(operation_name, service_model) + _validate_operation_has_output( + operation_name, service_model, record_property + ) _validate_input_keys_match(operation_name, page_config, service_model) _validate_output_keys_match(operation_name, page_config, service_model) - _validate_new_numeric_keys(operation_name, page_config, service_model) + _validate_new_numeric_keys( + operation_name, page_config, service_model, record_property + ) def _validate_known_pagination_keys(page_config): @@ -183,10 +192,14 @@ def _validate_referenced_operation_exists(operation_name, service_model): ) -def _validate_operation_has_output(operation_name, service_model): +def _validate_operation_has_output( + operation_name, service_model, record_property +): op_model = service_model.operation_model(operation_name) output = op_model.output_shape if output is None or not output.members: + if output: + record_property('shape', output.type_name) raise AssertionError( "Pagination config refers to operation " f"that does not have any output: {operation_name}" @@ -210,13 +223,9 @@ def _validate_input_keys_match(operation_name, page_config, service_model): limit_key = page_config['limit_key'] if limit_key not in valid_input_names: raise AssertionError( - "limit_key '{}' refers to a non existent " - "input member for operation: {}, valid keys: " - "{}".format( - limit_key, - operation_name, - ', '.join(list(valid_input_names)), - ) + f"limit_key '{limit_key}' refers to a non existent " + f"input member for operation: {operation_name}, valid keys: " + f"{', '.join(list(valid_input_names))}." ) @@ -236,7 +245,8 @@ def _validate_output_keys_match(operation_name, page_config, service_model): else: if output_key not in output_members: raise AssertionError( - f"Pagination key '{key_name}' refers to an output " + f"Pagination key '{key_name}' for operation " + f"{operation_name} refers to an output " f"member that does not exist: {output_key}" ) output_members.remove(output_key) @@ -253,16 +263,15 @@ def _validate_output_keys_match(operation_name, page_config, service_model): f.write(f"'{key}',\n") raise AssertionError( "There are member names in the output shape of " - "{} that are not accounted for in the pagination " - "config for service {}: {}".format( - operation_name, - service_model.service_name, - ', '.join(output_members), - ) + f"{operation_name} that are not accounted for in the pagination " + f"config for service {service_model.service_name}: " + f"{', '.join(output_members)}" ) -def _validate_new_numeric_keys(operation_name, page_config, service_model): +def _validate_new_numeric_keys( + operation_name, page_config, service_model, record_property +): output_shape = service_model.operation_model(operation_name).output_shape for key in _get_list_value(page_config, 'result_key'): current_shape = output_shape @@ -277,6 +286,7 @@ def _validate_new_numeric_keys(operation_name, page_config, service_model): and (service_model.service_name, operation_name) not in KNOWN_PAGINATORS_WITH_INTEGER_OUTPUTS ): + record_property('shape', current_shape.name) raise AssertionError( f'There is a new operation {operation_name} for service ' f'{service_model.service_name} that is configured to sum ' diff --git a/tests/functional/botocore/test_supported_protocols.py b/tests/functional/botocore/test_supported_protocols.py index f14bb9a2ec97..901202e47a88 100644 --- a/tests/functional/botocore/test_supported_protocols.py +++ b/tests/functional/botocore/test_supported_protocols.py @@ -51,9 +51,11 @@ def _single_protocol_test_cases(): _multi_protocol_test_cases(), ) def test_services_with_protocols_trait_have_supported_protocol( - service_name, supported_protocols + service_name, supported_protocols, record_property ): message = f"No protocols supported for service {service_name}" + # Store the service name in PyTest custom properties + record_property('aws_service', service_name) assert any( protocol in PRIORITY_ORDERED_SUPPORTED_PROTOCOLS for protocol in supported_protocols @@ -66,7 +68,9 @@ def test_services_with_protocols_trait_have_supported_protocol( _single_protocol_test_cases(), ) def test_services_without_protocols_trait_have_supported_protocol( - service_name, supported_protocol + service_name, supported_protocol, record_property ): message = f"Service protocol not supported for {service_name}" + # Store the service name in PyTest custom properties + record_property('aws_service', service_name) assert supported_protocol in PRIORITY_ORDERED_SUPPORTED_PROTOCOLS, message diff --git a/tests/functional/test_no_event_streams.py b/tests/functional/test_no_event_streams.py index fd3fbcb457cc..72ae95ff8976 100644 --- a/tests/functional/test_no_event_streams.py +++ b/tests/functional/test_no_event_streams.py @@ -20,7 +20,7 @@ @pytest.mark.validates_models -def test_no_event_stream_unless_allowed(): +def test_no_event_stream_unless_allowed(record_property): driver = create_clidriver() help_command = driver.create_help_command() errors = [] @@ -31,18 +31,24 @@ def test_no_event_stream_unless_allowed(): op_help = sub_command.create_help_command() model = op_help.obj if isinstance(model, OperationModel): - full_command = '%s %s' % (command_name, sub_name) + full_command = f'{command_name} {sub_name}' if ( model.has_event_stream_input or model.has_event_stream_output ): if full_command in _ALLOWED_COMMANDS: continue + # Store the service and operation in + # PyTest custom properties + record_property( + 'aws_service', model.service_model.service_id + ) + record_property('aws_operation', model.name) supported_commands = '\n'.join(_ALLOWED_COMMANDS) errors.append( - 'The "%s" command uses event streams ' + f'The {full_command} command uses event streams ' 'which is only supported for these operations:\n' - '%s' % (full_command, supported_commands) + f'{supported_commands}' ) if errors: raise AssertionError('\n' + '\n'.join(errors)) diff --git a/tests/functional/test_shadowing.py b/tests/functional/test_shadowing.py index 34060573226b..4ae8c89ba069 100644 --- a/tests/functional/test_shadowing.py +++ b/tests/functional/test_shadowing.py @@ -11,6 +11,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import pytest +from botocore.model import OperationModel from awscli.clidriver import create_clidriver @@ -29,7 +30,9 @@ def _generate_command_tests(): @pytest.mark.parametrize( "command_name, command_table, builtins", _generate_command_tests() ) -def test_no_shadowed_builtins(command_name, command_table, builtins): +def test_no_shadowed_builtins( + command_name, command_table, builtins, record_property +): """Verify no command params are shadowed or prefixed by the built in param. The CLI parses all command line options into a single namespace. @@ -58,13 +61,21 @@ def test_no_shadowed_builtins(command_name, command_table, builtins): errors = [] for sub_name, sub_command in command_table.items(): op_help = sub_command.create_help_command() + model = op_help.obj arg_table = op_help.arg_table for arg_name in arg_table: if any(p.startswith(arg_name) for p in builtins): + if isinstance(model, OperationModel): + # Store the service and operation in + # PyTest custom properties + record_property( + 'aws_service', model.service_model.service_id + ) + record_property('aws_operation', model.name) # Then we are shadowing or prefixing a top level argument errors.append( 'Shadowing/Prefixing a top level option: ' - '%s.%s.%s' % (command_name, sub_name, arg_name) + f'{command_name}.{sub_name}.{arg_name}' ) if errors: raise AssertionError('\n' + '\n'.join(errors))