Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PR #1410/3d4736bb backport][stable-3] Minor sanity test fixes (new devel) #1498

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions changelogs/fragments/1410-linting.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
minor_changes:
- autoscaling_group_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410).
- cloudfront_distribution - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410).
- cloudfront_origin_access_identity - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410).
- cloudtrail - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410).
- ec2_vpc_nacl - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410).
- ec2_asg_lifecycle_hook - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410).
- redshift - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410).
- s3_bucket_info - minor sanity test fixes (https://github.com/ansible-collections/community.aws/pull/1410).
9 changes: 4 additions & 5 deletions plugins/connection/aws_ssm.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,12 +204,11 @@
try:
import boto3
from botocore.client import Config
HAS_BOTO_3 = True
except ImportError as e:
HAS_BOTO_3_ERROR = str(e)
HAS_BOTO_3 = False
pass

from functools import wraps
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
from ansible.errors import AnsibleConnectionFailure, AnsibleError, AnsibleFileNotFound
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.six.moves import xrange
Expand Down Expand Up @@ -290,8 +289,8 @@ class Connection(ConnectionBase):
MARK_LENGTH = 26

def __init__(self, *args, **kwargs):
if not HAS_BOTO_3:
raise AnsibleError('{0}: {1}'.format(missing_required_lib("boto3"), HAS_BOTO_3_ERROR))
if not HAS_BOTO3:
raise AnsibleError('{0}'.format(missing_required_lib("boto3")))

super(Connection, self).__init__(*args, **kwargs)
self.host = self._play_context.remote_addr
Expand Down
29 changes: 10 additions & 19 deletions plugins/modules/aws_s3_bucket_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ def get_bucket_list(module, connection, name="", name_filter=""):
final_buckets = filtered_buckets
else:
final_buckets = buckets
return(final_buckets)
return final_buckets


def get_buckets_facts(connection, buckets, requested_facts, transform_location):
Expand All @@ -454,7 +454,7 @@ def get_buckets_facts(connection, buckets, requested_facts, transform_location):
bucket.update(get_bucket_details(connection, bucket['name'], requested_facts, transform_location))
full_bucket_list.append(bucket)

return(full_bucket_list)
return full_bucket_list


def get_bucket_details(connection, name, requested_facts, transform_location):
Expand Down Expand Up @@ -487,7 +487,7 @@ def get_bucket_details(connection, name, requested_facts, transform_location):
except botocore.exceptions.ClientError:
pass

return(all_facts)
return all_facts


@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
Expand All @@ -505,11 +505,8 @@ def get_bucket_location(name, connection, transform_location=False):
except KeyError:
pass
# Strip response metadata (not needed)
try:
data.pop('ResponseMetadata')
return(data)
except KeyError:
return(data)
data.pop('ResponseMetadata', None)
return data


@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
Expand All @@ -521,14 +518,11 @@ def get_bucket_tagging(name, connection):

try:
bucket_tags = boto3_tag_list_to_ansible_dict(data['TagSet'])
return(bucket_tags)
return bucket_tags
except KeyError:
# Strip response metadata (not needed)
try:
data.pop('ResponseMetadata')
return(data)
except KeyError:
return(data)
data.pop('ResponseMetadata', None)
return data


@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
Expand All @@ -541,11 +535,8 @@ def get_bucket_property(name, connection, get_api_name):
data = api_function(Bucket=name)

# Strip response metadata (not needed)
try:
data.pop('ResponseMetadata')
return(data)
except KeyError:
return(data)
data.pop('ResponseMetadata', None)
return data


def main():
Expand Down
12 changes: 6 additions & 6 deletions plugins/modules/cloudfront_distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -1689,7 +1689,7 @@ def validate_origins(self, client, config, origins, default_origin_domain_name,
if purge_origins:
for domain in list(all_origins.keys()):
if domain not in new_domains:
del(all_origins[domain])
del all_origins[domain]
return ansible_list_to_cloudfront_list(list(all_origins.values()))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution origins")
Expand All @@ -1707,7 +1707,7 @@ def validate_s3_origin_configuration(self, client, existing_config, origin):
cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference,
Comment=comment))
oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id']
except Exception as e:
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id'])
return "origin-access-identity/cloudfront/%s" % oai

Expand All @@ -1730,7 +1730,7 @@ def validate_origin(self, client, existing_config, origin, default_origin_path):
else:
s3_origin_config = None

del(origin["s3_origin_access_identity_enabled"])
del origin["s3_origin_access_identity_enabled"]

if s3_origin_config:
oai = s3_origin_config
Expand Down Expand Up @@ -1779,7 +1779,7 @@ def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge
all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior
if purge_cache_behaviors:
for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]):
del(all_cache_behaviors[target_origin_id])
del all_cache_behaviors[target_origin_id]
return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values()))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors")
Expand Down Expand Up @@ -1967,7 +1967,7 @@ def validate_custom_error_responses(self, config, custom_error_responses, purge_
if 'response_code' in custom_error_response:
custom_error_response['response_code'] = str(custom_error_response['response_code'])
if custom_error_response['error_code'] in existing_responses:
del(existing_responses[custom_error_response['error_code']])
del existing_responses[custom_error_response['error_code']]
result.append(custom_error_response)
if not purge_custom_error_responses:
result.extend(existing_responses.values())
Expand Down Expand Up @@ -2274,7 +2274,7 @@ def main():

if 'distribution_config' in result:
result.update(result['distribution_config'])
del(result['distribution_config'])
del result['distribution_config']

module.exit_json(changed=changed, **result)

Expand Down
3 changes: 1 addition & 2 deletions plugins/modules/cloudfront_origin_access_identity.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,8 +256,7 @@ def main():
else:
result = service_mgr.create_origin_access_identity(caller_reference, comment)
changed = True
elif(state == 'absent' and origin_access_identity_id is not None and
e_tag is not None):
elif state == 'absent' and origin_access_identity_id is not None and e_tag is not None:
result = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag)
changed = True

Expand Down
6 changes: 2 additions & 4 deletions plugins/modules/cloudtrail.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ def get_kms_key_aliases(module, client, keyId):
"""
try:
key_resp = client.list_aliases(KeyId=keyId)
except (BotoCoreError, ClientError) as err:
except (BotoCoreError, ClientError):
# Don't fail here, just return [] to maintain backwards compat
# in case user doesn't have kms:ListAliases permissions
return []
Expand Down Expand Up @@ -568,9 +568,7 @@ def main():
# all aliases for a match.
initial_aliases = get_kms_key_aliases(module, module.client('kms'), initial_kms_key_id)
for a in initial_aliases:
if(a['AliasName'] == new_key or
a['AliasArn'] == new_key or
a['TargetKeyId'] == new_key):
if a['AliasName'] == new_key or a['AliasArn'] == new_key or a['TargetKeyId'] == new_key:
results['changed'] = False

# Check if we need to start/stop logging
Expand Down
4 changes: 2 additions & 2 deletions plugins/modules/ec2_asg_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ def find_asgs(conn, module, name=None, tags=None):

try:
elbv2 = module.client('elbv2')
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
# This is nice to have, not essential
elbv2 = None
matched_asgs = []
Expand Down Expand Up @@ -407,7 +407,7 @@ def find_asgs(conn, module, name=None, tags=None):
# workaround for https://github.com/ansible/ansible/pull/25015
if 'target_group_ar_ns' in asg:
asg['target_group_arns'] = asg['target_group_ar_ns']
del(asg['target_group_ar_ns'])
del asg['target_group_ar_ns']
if asg.get('target_group_arns'):
if elbv2:
try:
Expand Down
6 changes: 3 additions & 3 deletions plugins/modules/ec2_asg_lifecycle_hook.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def create_lifecycle_hook(connection, module):
# GlobalTimeout is not configurable, but exists in response.
# Removing it helps to compare both dicts in order to understand
# what changes were done.
del(existing_hook[0]['GlobalTimeout'])
del existing_hook[0]['GlobalTimeout']
added, removed, modified, same = dict_compare(lch_params, existing_hook[0])
if added or removed or modified:
changed = True
Expand All @@ -166,7 +166,7 @@ def create_lifecycle_hook(connection, module):
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to create LifecycleHook")

return(changed)
return changed


def dict_compare(d1, d2):
Expand Down Expand Up @@ -213,7 +213,7 @@ def delete_lifecycle_hook(connection, module):
else:
pass

return(changed)
return changed


def main():
Expand Down
4 changes: 2 additions & 2 deletions plugins/modules/ec2_vpc_nacl.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,14 +347,14 @@ def setup_network_acl(client, module):
replace_network_acl_association(nacl_id, subnets, client, module)
construct_acl_entries(nacl, client, module)
changed = True
return(changed, nacl['NetworkAcl']['NetworkAclId'])
return changed, nacl['NetworkAcl']['NetworkAclId']
else:
changed = False
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
changed |= subnets_changed(nacl, client, module)
changed |= nacls_changed(nacl, client, module)
changed |= tags_changed(nacl_id, client, module)
return (changed, nacl_id)
return changed, nacl_id


def remove_network_acl(client, module):
Expand Down
13 changes: 5 additions & 8 deletions plugins/modules/redshift.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ def create_cluster(module, redshift):
changed = True
resource = _describe_cluster(redshift, identifier)

return(changed, _collect_facts(resource))
return changed, _collect_facts(resource)


def describe_cluster(module, redshift):
Expand All @@ -479,7 +479,7 @@ def describe_cluster(module, redshift):
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error describing cluster")

return(True, _collect_facts(resource))
return True, _collect_facts(resource)


def delete_cluster(module, redshift):
Expand Down Expand Up @@ -508,7 +508,7 @@ def delete_cluster(module, redshift):
ClusterIdentifier=identifier,
**snake_dict_to_camel_dict(params, capitalize_first=True))
except is_boto3_error_code('ClusterNotFound'):
return(False, {})
return False, {}
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to delete cluster")

Expand All @@ -523,7 +523,7 @@ def delete_cluster(module, redshift):
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Timeout deleting the cluster")

return(True, {})
return True, {}


def modify_cluster(module, redshift):
Expand All @@ -537,9 +537,6 @@ def modify_cluster(module, redshift):
identifier = module.params.get('identifier')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
tags = module.params.get('tags')
purge_tags = module.params.get('purge_tags')
region = region = module.params.get('region')

# Package up the optional parameters
params = {}
Expand Down Expand Up @@ -603,7 +600,7 @@ def modify_cluster(module, redshift):
if _ensure_tags(redshift, identifier, resource['Tags'], module):
resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]

return(True, _collect_facts(resource))
return True, _collect_facts(resource)


def main():
Expand Down
2 changes: 1 addition & 1 deletion scripts/inventory/ec2.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ def read_settings(self):
if six.PY3:
config = configparser.ConfigParser(DEFAULTS)
else:
config = configparser.SafeConfigParser(DEFAULTS)
config = configparser.SafeConfigParser(DEFAULTS) # pylint: disable=deprecated-class
ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path'])
ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path))

Expand Down
6 changes: 3 additions & 3 deletions tests/unit/plugins/connection/test_aws_ssm.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def test_plugins_connection_aws_ssm_wrap_command(self):
conn = connection_loader.get('community.aws.aws_ssm', pc, new_stdin)
conn.is_windows = MagicMock()
conn.is_windows.return_value = True
return('windows1')
return 'windows1'

def test_plugins_connection_aws_ssm_post_process(self):
pc = PlayContext()
Expand All @@ -103,7 +103,7 @@ def test_plugins_connection_aws_ssm_post_process(self):
fail = 2
conn.stdout = MagicMock()
returncode = 0
return(returncode, conn.stdout)
return returncode, conn.stdout

@patch('subprocess.Popen')
def test_plugins_connection_aws_ssm_flush_stderr(self, s_popen):
Expand All @@ -114,7 +114,7 @@ def test_plugins_connection_aws_ssm_flush_stderr(self, s_popen):
conn.poll_stderr.register = MagicMock()
conn.stderr = None
s_popen.poll().return_value = 123
return(conn.stderr)
return conn.stderr

@patch('boto3.client')
def test_plugins_connection_aws_ssm_get_url(self, boto):
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/plugins/modules/test_aws_acm.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,4 +122,4 @@ def test_chain_compare():
if expected != actual:
print("Error, unexpected comparison result between \n%s\nand\n%s" % (chain_a['path'], chain_b['path']))
print("Expected %s got %s" % (str(expected), str(actual)))
assert(expected == actual)
assert expected == actual