From feca0eb80d0ed70ccb302ecf41fe5bee11edef72 Mon Sep 17 00:00:00 2001 From: Clark Schneider Date: Mon, 2 May 2022 20:26:08 +0000 Subject: [PATCH] Issue #3 edit variable names per PEP8 --- .gitignore | 1 + README.md | 14 +- stacks/.~c9_invoke_P8ajVj.py | 671 ------------------ stacks/client_stack.py | 3 +- .../lambda_function.py | 28 +- .../lambda_function.py | 30 +- .../lambdas/custom_config/lambda_function.py | 10 +- .../handle_infraction/lambda_function.py | 42 +- .../invoked_by_apigw/lambda_function.py | 28 +- .../python_subprocess/lambda_function.py | 68 +- .../lambdas/s3_head_object/lambda_function.py | 16 +- .../lambdas/s3_put_object/lambda_function.py | 8 +- .../lambdas/s3_select/lambda_function.py | 26 +- .../sign_apigw_request/lambda_function.py | 6 +- .../write_results_report/lambda_function.py | 44 +- 15 files changed, 157 insertions(+), 838 deletions(-) delete mode 100644 stacks/.~c9_invoke_P8ajVj.py diff --git a/.gitignore b/.gitignore index e5eda1b8..81d64348 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ __pycache__ .cdk.staging cdk.out .env +.~c9_* \ No newline at end of file diff --git a/README.md b/README.md index 60265c33..8b418717 100644 --- a/README.md +++ b/README.md @@ -99,16 +99,4 @@ After running `cdk deploy`, the Control Broker will be set up. ## Next Steps -Try launching one of the [Example use cases](./README.md#example-use-cases)! - -# Setup - -## Install CDK experimental modules - -pip install aws_cdk.aws_lambda_python_alpha - -pip install aws_cdk.aws_apigatewayv2_alpha - -pip install aws_cdk.aws_apigatewayv2_integrations_alpha - -pip install aws_cdk.aws_apigatewayv2_authorizers_alpha +Try launching one of the [Example use cases](./README.md#example-use-cases)! \ No newline at end of file diff --git a/stacks/.~c9_invoke_P8ajVj.py b/stacks/.~c9_invoke_P8ajVj.py deleted file mode 100644 index eee54fbb..00000000 --- a/stacks/.~c9_invoke_P8ajVj.py +++ /dev/null @@ -1,671 +0,0 @@ -import os -import json -from typing import List, Sequence - -from aws_cdk import ( - Duration, - Stack, - RemovalPolicy, - CfnOutput, - SecretValue, - aws_config, - aws_dynamodb, - aws_s3, - aws_s3_deployment, - aws_lambda, - aws_stepfunctions, - aws_iam, - aws_logs, - aws_events, -) -from constructs import Construct - -from utils.mixins import SecretConfigStackMixin - - -class ControlBrokerStack(Stack, SecretConfigStackMixin): - def __init__( - self, - scope: Construct, - construct_id: str, - **kwargs, - ) -> None: - """A full Control Broker installation. - - :param scope: - :type scope: Construct - :param construct_id: - :type construct_id: str - :param continously_deployed: Whether to launch the Control Broker via a CDK Pipeline and deploy on code changes, defaults to True - :type continously_deployed: bool, optional - :param github_repo_name: Required if continously_deployed is True - :type github_repo_name: str, optional - :param github_repo_owner: Required if continously_deployed is True - :type github_repo_owner: str, optional - :param github_repo_branch: Required if continously_deployed is True - :type github_repo_branch: str, optional - - :raises ValueError: When config_rule_enabled is True and config_rule_scope is None - :raises ValueError: When continously_deployed is True and any of the github variables is not set - """ - super().__init__(scope, construct_id, **kwargs) - - self.deploy_utils() - self.s3_deploy_local_assets() - self.deploy_inner_sfn_lambdas() - self.deploy_inner_sfn() - self.deploy_outer_sfn_lambdas() - self.deploy_outer_sfn() - - self.Input_reader_roles: List[aws_iam.Role] = [ - self.lambda_opa_eval_python_subprocess.role, - ] - - self.outer_eval_engine_state_machine = ( - aws_stepfunctions.StateMachine.from_state_machine_arn( - self, - "OuterEvalEngineStateMachineObj", - self.sfn_outer_eval_engine.attr_arn, - ) - ) - - self.eval_results_reports_bucket = aws_s3.Bucket.from_bucket_name( - self, - "EvalResultsReportsBucketObj", - self.bucket_eval_results_reports.bucket_name, - ) - - CfnOutput( - self, - "InputReaderArns", - value=json.dumps([r.role_arn for r in self.Input_reader_roles]), - ) - - def deploy_utils(self): - - # eval results - - self.table_eval_results = aws_dynamodb.Table( - self, - "EvalResults", - partition_key=aws_dynamodb.Attribute( - name="pk", type=aws_dynamodb.AttributeType.STRING - ), - sort_key=aws_dynamodb.Attribute( - name="sk", type=aws_dynamodb.AttributeType.STRING - ), - billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST, - removal_policy=RemovalPolicy.DESTROY, - ) - - # event bridge bus - - self.event_bus_infractions = aws_events.EventBus(self, "Infractions") - - # debug event bridge by logging events - - logs_infraction_events = aws_logs.LogGroup( - self, "InfractionEvents", removal_policy=RemovalPolicy.DESTROY - ) - logs_infraction_events.grant_write( - aws_iam.ServicePrincipal("events.amazonaws.com") - ) - - cfn_rule = aws_events.CfnRule( - self, - "ListenAllInfractions", - state="ENABLED", - event_bus_name=self.event_bus_infractions.event_bus_name, - event_pattern=aws_events.EventPattern(account=[self.account]), - targets=[ - aws_events.CfnRule.TargetProperty( - arn=logs_infraction_events.log_group_arn, id="InfractionEvents" - ) - ], - ) - - # results reports - - self.bucket_eval_results_reports = aws_s3.Bucket( - self, - "EvalResultsReports", - removal_policy=RemovalPolicy.DESTROY, - auto_delete_objects=True, - block_public_access=aws_s3.BlockPublicAccess( - block_public_acls=True, - ignore_public_acls=True, - block_public_policy=True, - restrict_public_buckets=True, - ), - ) - - self.bucket_eval_results_reports.add_to_resource_policy( - aws_iam.PolicyStatement( - principals=[ - aws_iam.AnyPrincipal().with_conditions( - { - "ForAnyValue:StringLike": { - "aws:PrincipalOrgPaths": [self.secrets.allowed_org_path] - } - } - ) - ], - actions=[ - "s3:GetObject", - ], - resources=[ - self.bucket_eval_results_reports.bucket_arn, - self.bucket_eval_results_reports.arn_for_objects("*"), - ], - ) - ) - - def s3_deploy_local_assets(self): - - # opa policies - - self.bucket_opa_policies = aws_s3.Bucket( - self, - "OpaPolicies", - block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL, - removal_policy=RemovalPolicy.DESTROY, - auto_delete_objects=True, - ) - - aws_s3_deployment.BucketDeployment( - self, - "OpaPoliciesByService", - sources=[ - aws_s3_deployment.Source.asset("./supplementary_files/opa-policies") - ], - destination_bucket=self.bucket_opa_policies, - retain_on_delete=False, - ) - - def deploy_inner_sfn_lambdas(self): - - # opa eval - python subprocess - single threaded - - self.lambda_opa_eval_python_subprocess = aws_lambda.Function( - self, - "OpaEvalPythonSubprocessSingleThreaded", - runtime=aws_lambda.Runtime.PYTHON_3_9, - handler="lambda_function.lambda_handler", - timeout=Duration.seconds(60), - memory_size=10240, # todo power-tune - code=aws_lambda.Code.from_asset( - "./supplementary_files/lambdas/opa_eval/python_subprocess" - ), - ) - - self.lambda_opa_eval_python_subprocess.role.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - "s3:HeadObject", - "s3:GetObject", - "s3:List*", - ], - resources=[ - self.bucket_opa_policies.bucket_arn, - self.bucket_opa_policies.arn_for_objects("*"), - ], - ) - ) - - # gather infractions - - self.lambda_gather_infractions = aws_lambda.Function( - self, - "GatherInfractions", - runtime=aws_lambda.Runtime.PYTHON_3_9, - handler="lambda_function.lambda_handler", - timeout=Duration.seconds(60), - memory_size=1024, - code=aws_lambda.Code.from_asset( - "./supplementary_files/lambdas/gather_infractions" - ), - ) - - # handle infraction - - self.lambda_handle_infraction = aws_lambda.Function( - self, - "HandleInfraction", - runtime=aws_lambda.Runtime.PYTHON_3_9, - handler="lambda_function.lambda_handler", - timeout=Duration.seconds(60), - memory_size=1024, - code=aws_lambda.Code.from_asset( - "./supplementary_files/lambdas/handle_infraction" - ), - environment={ - "TableName": self.table_eval_results.table_name, - "EventBusName": self.event_bus_infractions.event_bus_name, - }, - ) - - self.lambda_handle_infraction.role.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - "dynamodb:UpdateItem", - "dynamodb:Query", - ], - resources=[ - self.table_eval_results.table_arn, - f"{self.table_eval_results.table_arn}/*", - ], - ) - ) - - self.lambda_handle_infraction.role.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - "events:PutEvents", - ], - resources=[ - self.event_bus_infractions.event_bus_arn, - f"{self.event_bus_infractions.event_bus_arn}*", - ], - ) - ) - - def deploy_inner_sfn(self): - - log_group_inner_eval_engine_sfn = aws_logs.LogGroup( - self, - "InnerEvalEngineSfnLogs", - log_group_name=f"/aws/vendedlogs/states/InnerEvalEngineSfnLogs-{self.stack_name}", - removal_policy=RemovalPolicy.DESTROY, - ) - - self.role_inner_eval_engine_sfn = aws_iam.Role( - self, - "InnerEvalEngineSfn", - assumed_by=aws_iam.ServicePrincipal("states.amazonaws.com"), - ) - - self.role_inner_eval_engine_sfn.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - # "logs:*", - "logs:CreateLogDelivery", - "logs:GetLogDelivery", - "logs:UpdateLogDelivery", - "logs:DeleteLogDelivery", - "logs:ListLogDeliveries", - "logs:PutResourcePolicy", - "logs:DescribeResourcePolicies", - "logs:DescribeLogGroups", - ], - resources=[ - "*", - log_group_inner_eval_engine_sfn.log_group_arn, - f"{log_group_inner_eval_engine_sfn.log_group_arn}*", - ], - ) - ) - self.role_inner_eval_engine_sfn.add_to_policy( - aws_iam.PolicyStatement( - actions=["lambda:InvokeFunction"], - resources=[ - self.lambda_opa_eval_python_subprocess.function_arn, - self.lambda_gather_infractions.function_arn, - self.lambda_handle_infraction.function_arn, - ], - ) - ) - self.role_inner_eval_engine_sfn.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - "dynamodb:UpdateItem", - "dynamodb:Query", - ], - resources=[ - self.table_eval_results.table_arn, - f"{self.table_eval_results.table_arn}/*", - ], - ) - ) - self.role_inner_eval_engine_sfn.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - "events:PutEvents", - ], - resources=[ - self.event_bus_infractions.event_bus_arn, - f"{self.event_bus_infractions.event_bus_arn}*", - ], - ) - ) - - self.sfn_inner_eval_engine = aws_stepfunctions.CfnStateMachine( - self, - "InnerEvalEngine", - state_machine_type="EXPRESS", - role_arn=self.role_inner_eval_engine_sfn.role_arn, - logging_configuration=aws_stepfunctions.CfnStateMachine.LoggingConfigurationProperty( - destinations=[ - aws_stepfunctions.CfnStateMachine.LogDestinationProperty( - cloud_watch_logs_log_group=aws_stepfunctions.CfnStateMachine.CloudWatchLogsLogGroupProperty( - log_group_arn=log_group_inner_eval_engine_sfn.log_group_arn - ) - ) - ], - # include_execution_data=False, - # level="ERROR", - include_execution_data=True, - level="ALL", - ), - definition_string=json.dumps( - { - "StartAt": "ParseInput", - "States": { - "ParseInput": { - "Type": "Pass", - "Next": "OpaEval", - "Parameters": { - "JsonInput": { - "Bucket.$": "$.Input.Bucket", - "Key.$": "$.Input.Key", - }, - "OuterEvalEngineSfnExecutionId.$": "$.OuterEvalEngineSfn.ExecutionId", - "ConsumerMetadata.$":"$.ConsumerMetadata", - }, - "ResultPath": "$", - }, - "OpaEval": { - "Type": "Task", - "Next": "GatherInfractions", - "ResultPath": "$.OpaEval", - "Resource": "arn:aws:states:::lambda:invoke", - "Parameters": { - "FunctionName": self.lambda_opa_eval_python_subprocess.function_name, - "Payload": { - "JsonInput.$": "$.JsonInput", - "OpaPolicies": { - "Bucket": self.bucket_opa_policies.bucket_name - }, - }, - }, - "ResultSelector": { - "OpaEvalResults.$": "$.Payload.OpaEvalResults" - }, - }, - "GatherInfractions": { - "Type": "Task", - "Next": "ChoiceInfractionsExist", - "ResultPath": "$.GatherInfractions", - "Resource": "arn:aws:states:::lambda:invoke", - "Parameters": { - "FunctionName": self.lambda_gather_infractions.function_name, - "Payload.$": "$.OpaEval.OpaEvalResults", - }, - "ResultSelector": { - "Infractions.$": "$.Payload.Infractions" - }, - }, - "ChoiceInfractionsExist": { - "Type": "Choice", - "Default": "ForEachInfraction", - "Choices": [ - { - "Variable": "$.GatherInfractions.Infractions[0]", - "IsPresent": False, - "Next": "NoInfractions", - } - ], - }, - "NoInfractions": { - "Type": "Succeed", - }, - "ForEachInfraction": { - "Type": "Map", - "Next": "InfractionsExist", - "ResultPath": "$.ForEachInfraction", - "ItemsPath": "$.GatherInfractions.Infractions", - "Parameters": { - "Infraction.$": "$$.Map.Item.Value", - "JsonInput.$": "$.JsonInput", - "OuterEvalEngineSfnExecutionId.$": "$.OuterEvalEngineSfnExecutionId", - "ConsumerMetadata.$": "$.ConsumerMetadata", - }, - "Iterator": { - "StartAt": "HandleInfraction", - "States": { - "HandleInfraction": { - "Type": "Task", - "End": True, - "ResultPath": "$.HandleInfraction", - "Resource": "arn:aws:states:::lambda:invoke", - "Parameters": { - "FunctionName": self.lambda_handle_infraction.function_name, - "Payload": { - "Infraction.$": "$.Infraction", - "JsonInput.$": "$.JsonInput", - "OuterEvalEngineSfnExecutionId.$": "$.OuterEvalEngineSfnExecutionId", - "ConsumerMetadata.$": "$.ConsumerMetadata", - } - }, - "ResultSelector": {"Payload.$": "$.Payload"}, - }, - }, - }, - }, - "InfractionsExist": { - "Type": "Fail", - }, - }, - } - ), - ) - - self.sfn_inner_eval_engine.node.add_dependency(self.role_inner_eval_engine_sfn) - - # CfnOutput(self, "InnerSfnArn", value=self.sfn_inner_eval_engine.attr_arn) - - def deploy_outer_sfn_lambdas(self): - - # write results report - - self.lambda_write_results_report = aws_lambda.Function( - self, - "WriteResultsReport", - runtime=aws_lambda.Runtime.PYTHON_3_9, - handler="lambda_function.lambda_handler", - timeout=Duration.seconds(60), - memory_size=1024, - code=aws_lambda.Code.from_asset( - "./supplementary_files/lambdas/write_results_report" - ), - environment={"EvalResultsTable": self.table_eval_results.table_name}, - ) - - self.lambda_write_results_report.role.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - "dynamodb:Query", - ], - resources=[ - self.table_eval_results.table_arn, - f"{self.table_eval_results.table_arn}*", - ], - ) - ) - self.lambda_write_results_report.role.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - "s3:List*", - ], - resources=[self.bucket_eval_results_reports.bucket_arn], - ) - ) - self.lambda_write_results_report.role.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - "s3:PutObject", - ], - resources=[ - self.bucket_eval_results_reports.arn_for_objects("*"), - ], - ) - ) - - def deploy_outer_sfn(self): - - log_group_outer_eval_engine_sfn = aws_logs.LogGroup( - self, - "OuterEvalEngineSfnLogs", - log_group_name=f"/aws/vendedlogs/states/OuterEvalEngineSfnLogs-{self.stack_name}", - removal_policy=RemovalPolicy.DESTROY, - ) - - role_outer_eval_engine_sfn = aws_iam.Role( - self, - "OuterEvalEngineSfn", - assumed_by=aws_iam.ServicePrincipal("states.amazonaws.com"), - ) - - role_outer_eval_engine_sfn.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - # "logs:*", - "logs:CreateLogDelivery", - "logs:GetLogDelivery", - "logs:UpdateLogDelivery", - "logs:DeleteLogDelivery", - "logs:ListLogDeliveries", - "logs:PutResourcePolicy", - "logs:DescribeResourcePolicies", - "logs:DescribeLogGroups", - ], - resources=[ - "*", - log_group_outer_eval_engine_sfn.log_group_arn, - f"{log_group_outer_eval_engine_sfn.log_group_arn}*", - ], - ) - ) - role_outer_eval_engine_sfn.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - "states:StartExecution", - "states:StartSyncExecution", - ], - resources=[self.sfn_inner_eval_engine.attr_arn], - ) - ) - role_outer_eval_engine_sfn.add_to_policy( - aws_iam.PolicyStatement( - actions=[ - "lambda:InvokeFunction", - ], - resources=[self.lambda_write_results_report.function_arn], - ) - ) - role_outer_eval_engine_sfn.add_to_policy( - aws_iam.PolicyStatement( - actions=["states:DescribeExecution", "states:StopExecution"], - resources=["*"], - ) - ) - role_outer_eval_engine_sfn.add_to_policy( - aws_iam.PolicyStatement( - actions=["events:PutTargets", "events:PutRule", "events:DescribeRule"], - resources=[ - f"arn:aws:events:{os.getenv('CDK_DEFAULT_REGION')}:{os.getenv('CDK_DEFAULT_ACCOUNT')}:rule/StepFunctionsGetEventsForStepFunctionsExecutionRule", - "*", - ], - ) - ) - - self.sfn_outer_eval_engine = aws_stepfunctions.CfnStateMachine( - self, - "OuterEvalEngine", - state_machine_type="EXPRESS", - # state_machine_type="STANDARD", - role_arn=role_outer_eval_engine_sfn.role_arn, - logging_configuration=aws_stepfunctions.CfnStateMachine.LoggingConfigurationProperty( - destinations=[ - aws_stepfunctions.CfnStateMachine.LogDestinationProperty( - cloud_watch_logs_log_group=aws_stepfunctions.CfnStateMachine.CloudWatchLogsLogGroupProperty( - log_group_arn=log_group_outer_eval_engine_sfn.log_group_arn - ) - ) - ], - # include_execution_data=False, - # level="ERROR", - include_execution_data=True, - level="ALL", - ), - definition_string=json.dumps( - { - "StartAt": "ForEachInput", - "States": { - "ForEachInput": { - "Type": "Map", - "Next": "WriteResultsReport", - "ResultPath": "$.ForEachInput", - "ItemsPath": "$.InvokedByApigw.ControlBrokerConsumerInputs.InputKeys", - "Parameters": { - "Input": { - "Bucket.$": "$.InvokedByApigw.ControlBrokerConsumerInputs.Bucket", - "Key.$": "$$.Map.Item.Value", - }, - "ConsumerMetadata.$": "$.InvokedByApigw.ControlBrokerConsumerInputs.ConsumerMetadata", - }, - "Iterator": { - "StartAt": "InvokeInnerEvalEngineSfn", - "States": { - "InvokeInnerEvalEngineSfn": { - "Type": "Task", - "Next": "ChoiceEvalEngineStatus", - "ResultPath": "$.InvokeInnerEvalEngineSfn", - "Resource": "arn:aws:states:::aws-sdk:sfn:startSyncExecution", - "Parameters": { - "StateMachineArn": self.sfn_inner_eval_engine.attr_arn, - "Input": { - "Input.$": "$.Input", - "OuterEvalEngineSfn": { - "ExecutionId.$": "$$.Execution.Id" - }, - "ConsumerMetadata.$": "$.ConsumerMetadata", - }, - }, - }, - "ChoiceEvalEngineStatus": { - "Type": "Choice", - "Default": "InnerSfnFailed", - "Choices": [ - { - "Variable": "$.InvokeInnerEvalEngineSfn.Status", - "StringEquals": "SUCCEEDED", - "Next": "InnerSfnSucceeded", - } - ], - }, - "InnerSfnFailed": {"Type": "Pass", "End": True}, - "InnerSfnSucceeded": {"Type": "Pass", "End": True}, - }, - }, - }, - "WriteResultsReport": { - "Type": "Task", - "End": True, - "ResultPath": "$.WriteResultReport", - "Resource": "arn:aws:states:::lambda:invoke", - "Parameters": { - "FunctionName": self.lambda_write_results_report.function_name, - "Payload": { - "OuterEvalEngineSfnExecutionId.$": "$$.Execution.Id", - "ResultsReportS3Uri.$":"$.ResultsReportS3Uri", - "ForEachInput.$":"$.ForEachInput", - } - }, - "ResultSelector": {"Payload.$": "$.Payload"}, - }, - }, - } - ), - ) - - self.sfn_outer_eval_engine.node.add_dependency(role_outer_eval_engine_sfn) diff --git a/stacks/client_stack.py b/stacks/client_stack.py index 3d6a2bae..6b5467ac 100644 --- a/stacks/client_stack.py +++ b/stacks/client_stack.py @@ -1,6 +1,7 @@ import os import json from typing import List, Sequence +from os import path from aws_cdk import ( Duration, @@ -138,7 +139,7 @@ def apigw(self): # authorizer=authorizer_iam ) - self.apigw_full_invoke_url = f'{self.http_api.url[:-1]}{self.path}' # remove duplicate slash + self.apigw_full_invoke_url = path.join(self.http_api.url.rstrip("/"),self.path.strip('/')) CfnOutput(self, "ApigwInvokeUrl", value=self.apigw_full_invoke_url) diff --git a/supplementary_files/lambdas/convert_config_to_cfn_cloudcontrol/lambda_function.py b/supplementary_files/lambdas/convert_config_to_cfn_cloudcontrol/lambda_function.py index ac34f9f8..8d11d7dc 100644 --- a/supplementary_files/lambdas/convert_config_to_cfn_cloudcontrol/lambda_function.py +++ b/supplementary_files/lambdas/convert_config_to_cfn_cloudcontrol/lambda_function.py @@ -8,18 +8,18 @@ class CloudControl(): - def __init__(self,TypeName,Identifier): - self.type_name = TypeName - self.identifier = Identifier + def __init__(self,type_name,identifier): + self.type_name = type_name + self.identifier = identifier - def get_resource_schema(self,*,ResourceType): + def get_resource_schema(self,*,resource_type): try: r = cfn.describe_type( Type = 'RESOURCE', - TypeName = ResourceType, + type_name = resource_type, ) except cfn.exceptions.TypeNotFoundException: - print(f'TypeNotFoundException: {ResourceType}') + print(f'TypeNotFoundException: {resource_type}') return None except ClientError as e: raise @@ -28,25 +28,25 @@ def get_resource_schema(self,*,ResourceType): print(schema) return schema - def cloudcontrol_get(self,*,TypeName,Identifier): + def cloudcontrol_get(self,*,type_name,identifier): try: r = cloudcontrol.get_resource( - TypeName = TypeName, - Identifier = Identifier + type_name = type_name, + identifier = identifier ) except ClientError as e: print(f'ClientError\n{e}') raise else: properties = json.loads(r['ResourceDescription']['Properties']) - print(f'cloudcontrol.get_resource properties\nTypeName:\n{TypeName}\nIdentifier:\n{Identifier}\nProperties:\n{properties}') + print(f'cloudcontrol.get_resource properties\ntype_name:\n{type_name}\nidentifier:\n{identifier}\nProperties:\n{properties}') return properties def get_cfn(self): - cloudcontrol_properties = self.cloudcontrol_get(TypeName=self.type_name,Identifier=self.identifier) + cloudcontrol_properties = self.cloudcontrol_get(type_name=self.type_name,identifier=self.identifier) - resource_schema = self.get_resource_schema(ResourceType=self.type_name) + resource_schema = self.get_resource_schema(resource_type=self.type_name) print(f'resource_schema:\n{resource_schema}') schema_properties = resource_schema['properties'] @@ -89,7 +89,7 @@ def lambda_handler(event, context): item_status = configuration_item["configurationItemStatus"] print(f'item_status:\n{item_status}') - resource_type = configuration_item['resourceType'] + resource_type = configuration_item['resource_type'] print(f'resource_type:\n{resource_type}') resource_configuration = configuration_item['configuration'] @@ -103,7 +103,7 @@ def lambda_handler(event, context): # only 506/874 resources can use CloudControl, by a recent count of NON_PROVISIONABLE status - c = CloudControl(TypeName=resource_type,Identifier=resource_id) + c = CloudControl(type_name=resource_type,identifier=resource_id) cfn = c.get_cfn() diff --git a/supplementary_files/lambdas/convert_config_to_cfn_describe_type/lambda_function.py b/supplementary_files/lambdas/convert_config_to_cfn_describe_type/lambda_function.py index 49589f22..ce8bbd4d 100644 --- a/supplementary_files/lambdas/convert_config_to_cfn_describe_type/lambda_function.py +++ b/supplementary_files/lambdas/convert_config_to_cfn_describe_type/lambda_function.py @@ -26,9 +26,9 @@ class AwsResource(): - def __init__(self,ResourceType,ResourceId): - self.resource_type = ResourceType - self.resource_id = ResourceId + def __init__(self,resource_type,resource_id): + self.resource_type = resource_type + self.resource_id = resource_id self.api_calls = { "AWS::SNS::Topic": { @@ -46,20 +46,20 @@ def main(self): class DescribeType(): - def __init__(self,TypeName,ResourceConfiguration,ResourceId): - self.type_name = TypeName - self.resource_configuration = ResourceConfiguration - self.resource_id = ResourceId + def __init__(self,type_name,resource_configuration,resource_id): + self.type_name = type_name + self.resource_configuration = resource_configuration + self.resource_id = resource_id - def get_resource_schema(self,*,TypeName): + def get_resource_schema(self,*,type_name): try: r = cfn.describe_type( Type = 'RESOURCE', - TypeName = TypeName, + type_name = type_name, ) except cfn.exceptions.TypeNotFoundException: - print(f'\nTypeNotFoundException: {TypeName}') + print(f'\nTypeNotFoundException: {type_name}') return None except ClientError as e: raise @@ -81,7 +81,7 @@ def format_to_cfn(Input): else: return Input - resource_schema = self.get_resource_schema(TypeName=self.type_name) + resource_schema = self.get_resource_schema(type_name=self.type_name) print(f'\nresource_schema:\n') pp(resource_schema) @@ -178,12 +178,12 @@ def lambda_handler(event, context): print(f'\nresource_configuration_keys:\n') pp(resource_configuration_keys) - resource_type = configuration_item['resourceType'] - resource_id = configuration_item['resourceId'] + resource_type = configuration_item['resource_type'] + resource_id = configuration_item['resource_id'] # only 506/874 resources can use CloudControl, by a recent count of NON_PROVISIONABLE status - # d = DescribeType(TypeName=resource_type,ResourceConfiguration=resource_configuration,ResourceId=resource_id) + # d = DescribeType(type_name=resource_type,resource_configuration=resource_configuration,resource_id=resource_id) # cfn = d.get_cfn() @@ -194,7 +194,7 @@ def lambda_handler(event, context): # "Cfn": cfn # } - a = AwsResource(ResourceType=resource_type,ResourceId=resource_id) + a = AwsResource(resource_type=resource_type,resource_id=resource_id) a.main() ######################################################## diff --git a/supplementary_files/lambdas/custom_config/lambda_function.py b/supplementary_files/lambdas/custom_config/lambda_function.py index 476b6889..ce7ed22a 100644 --- a/supplementary_files/lambdas/custom_config/lambda_function.py +++ b/supplementary_files/lambdas/custom_config/lambda_function.py @@ -8,9 +8,9 @@ config = boto3.client("config") -def sync_sfn(*, SfnArn, Input: dict): +def sync_sfn(*, sfn_arn, input: dict): try: - r = sfn.start_sync_execution(stateMachineArn=SfnArn, input=json.dumps(Input)) + r = sfn.start_sync_execution(stateMachineArn=sfn_arn, input=json.dumps(input)) except ClientError as e: print(f"ClientError\n{e}") raise @@ -25,9 +25,9 @@ def sync_sfn(*, SfnArn, Input: dict): return output -def async_sfn(*, SfnArn, Input: dict): +def async_sfn(*, sfn_arn, input: dict): try: - r = sfn.start_execution(stateMachineArn=SfnArn, input=json.dumps(Input)) + r = sfn.start_execution(stateMachineArn=sfn_arn, input=json.dumps(input)) except ClientError as e: print(f"ClientError\n{e}") raise @@ -126,7 +126,7 @@ def lambda_handler(event, context): result_token = event["resultToken"] print(f"result_token:\n{result_token}") - processed = sync_sfn(SfnArn=os.environ["ProcessingSfnArn"], Input={"Config": event}) + processed = sync_sfn(sfn_arn=os.environ["Processingsfn_arn"], input={"Config": event}) print(f"processed:\n{processed}") # return to Config compliance status - let Config notify diff --git a/supplementary_files/lambdas/handle_infraction/lambda_function.py b/supplementary_files/lambdas/handle_infraction/lambda_function.py index ac1e4a66..a63169c2 100644 --- a/supplementary_files/lambdas/handle_infraction/lambda_function.py +++ b/supplementary_files/lambdas/handle_infraction/lambda_function.py @@ -8,10 +8,10 @@ eb = boto3.client('events') def update_item(*, - Table, - Pk, - Sk, - Attributes:dict[str,str] + table, + pk, + sk, + attributes:dict[str,str] ): def ddb_compatible_type(Item): @@ -20,13 +20,13 @@ def ddb_compatible_type(Item): else: return Item - table = ddb.Table(Table) + table = ddb.Table(table) expression_attribute_values = {} update_expressions = [] - for index, (key, value) in enumerate(Attributes.items()): + for index, (key, value) in enumerate(attributes.items()): placeholder = f':{chr(97+index)}' @@ -41,8 +41,8 @@ def ddb_compatible_type(Item): try: r = table.update_item( Key = { - 'pk':Pk, - 'sk':Sk + 'pk':pk, + 'sk':sk }, UpdateExpression = update_expression, ExpressionAttributeValues = expression_attribute_values @@ -56,18 +56,18 @@ def ddb_compatible_type(Item): return True def put_event_entry(*, - EventBusName, - Source, - Detail:dict + event_bus_name, + source, + detail:dict ): try: r = eb.put_events( Entries = [ { - 'EventBusName':EventBusName, - 'Detail':json.dumps(Detail), + 'EventBusName':event_bus_name, + 'Detail':json.dumps(detail), 'DetailType':os.environ.get('AWS_LAMBDA_FUNCTION_NAME'), - 'Source':Source, + 'source':source, } ] ) @@ -98,18 +98,18 @@ def lambda_handler(event, context): # to ddb update = update_item( - Table = os.environ['TableName'], - Pk = outer_eval_enginge_sfn_execution_id, - Sk = sk, - Attributes = consumer_metadata + table = os.environ['tableName'], + pk = outer_eval_enginge_sfn_execution_id, + sk = sk, + attributes = consumer_metadata ) # to eb put = put_event_entry( - EventBusName = os.environ.get('EventBusName'), - Source = outer_eval_enginge_sfn_execution_id, - Detail = { + event_bus_name = os.environ.get('event_bus_name'), + source = outer_eval_enginge_sfn_execution_id, + detail = { 'Infraction':event.get('Infraction'), 'ConsumerMetadata':consumer_metadata, 'OuterEvalEngineSfnExecutionId':event.get('OuterEvalEngineSfnExecutionId') diff --git a/supplementary_files/lambdas/invoked_by_apigw/lambda_function.py b/supplementary_files/lambdas/invoked_by_apigw/lambda_function.py index e7d2ae35..53b9badf 100644 --- a/supplementary_files/lambdas/invoked_by_apigw/lambda_function.py +++ b/supplementary_files/lambdas/invoked_by_apigw/lambda_function.py @@ -9,22 +9,22 @@ sfn = boto3.client('stepfunctions') -def extract_acces_key_id(*,Aws4Authorization): - m = re.search('AWS4-HMAC-SHA256 Credential=(\w*)/.*',Aws4Authorization) +def extract_acces_key_id(*,aws4_authorization): + m = re.search('AWS4-HMAC-SHA256 Credential=(\w*)/.*',aws4_authorization) return m.group(1) def generate_uuid(): return str(uuid.uuid4()) -def get_result_report_s3_uri(*,EvalResultsReportsBucket): +def get_result_report_s3_uri(*,eval_results_reports_bucket): uuid = generate_uuid() - s3_uri = f's3://{EvalResultsReportsBucket}/cb-{uuid}' + s3_uri = f's3://{eval_results_reports_bucket}/cb-{uuid}' return s3_uri -def get_requestor_authorization_status(*,AuthorizationHeader): +def get_requestor_authorization_status(*,authorization_header): return True # TODO @@ -32,14 +32,14 @@ def get_eval_engine_read_access_to_inputs_status(): return True #TODO -def async_sfn(*, SfnArn, Input: dict): +def async_sfn(*, sfn_arn, input: dict): try: - r = sfn.start_execution(stateMachineArn=SfnArn, input=json.dumps(Input)) + r = sfn.start_execution(stateMachineArn=sfn_arn, input=json.dumps(input)) except ClientError as e: print(f"ClientError\n{e}") raise else: - print(f'no ClientError start_execution:\nSfnArn:\n{SfnArn}\nInput:\n{Input}') + print(f'no ClientError start_execution:\nsfn_arn:\n{sfn_arn}\ninput:\n{input}') return r["executionArn"] @@ -49,7 +49,7 @@ def lambda_handler(event,context): post_request_json_body = json.loads(event['body']) - eval_engine_sfn_arn = os.environ.get('ControlBrokerOuterSfnArn') + eval_engine_sfn_arn = os.environ.get('ControlBrokerOutersfn_arn') print(f'eval_engine_sfn_arn:\n{eval_engine_sfn_arn}') eval_results_reports_bucket = os.environ.get('ControlBrokerEvalResultsReportsBucket') @@ -62,7 +62,7 @@ def lambda_handler(event,context): print(f'authorization_header:\n{authorization_header}') result_report_s3_path = get_result_report_s3_uri( - EvalResultsReportsBucket = eval_results_reports_bucket + eval_results_reports_bucket = eval_results_reports_bucket ) eval_engine_sfn_input = { @@ -71,13 +71,13 @@ def lambda_handler(event,context): } eval_engine_sfn_execution_arn = async_sfn( - SfnArn = eval_engine_sfn_arn, - Input = eval_engine_sfn_input + sfn_arn = eval_engine_sfn_arn, + input = eval_engine_sfn_input ) control_broker_request_status = { - "RequestorIsAuthorized": get_requestor_authorization_status(AuthorizationHeader=authorization_header), - "EvalEngineHasReadAccessToInputs": get_eval_engine_read_access_to_inputs_status(), + "RequestorIsAuthorized": get_requestor_authorization_status(authorization_header=authorization_header), + "EvalEngineHasReadAccessToinputs": get_eval_engine_read_access_to_inputs_status(), "ResultsReportS3Uri": result_report_s3_path, "EvalEngineSfnExecutionArn": eval_engine_sfn_execution_arn } diff --git a/supplementary_files/lambdas/opa_eval/python_subprocess/lambda_function.py b/supplementary_files/lambdas/opa_eval/python_subprocess/lambda_function.py index 2de1cee0..37afda59 100644 --- a/supplementary_files/lambdas/opa_eval/python_subprocess/lambda_function.py +++ b/supplementary_files/lambdas/opa_eval/python_subprocess/lambda_function.py @@ -10,52 +10,52 @@ s3 = boto3.client('s3') s3r = boto3.resource('s3') -def s3_download(*,Bucket,Key,LocalPath): +def s3_download(*,bucket,key,local_path): try: s3.download_file( - Bucket, - Key, - LocalPath + bucket, + key, + local_path ) except ClientError as e: - print(f'ClientError:\nBucket: {Bucket}\nKey: {Key}\n{e}') + print(f'ClientError:\nbucket: {bucket}\nkey: {key}\n{e}') raise else: - print(f'No ClientError download_file\nBucket:\n{Bucket}\nKey:\n{Key}') + print(f'No ClientError download_file\nbucket:\n{bucket}\nkey:\n{key}') return True -def s3_download_dir(*,Bucket, Prefix=None, LocalPath): - print(f'Begin s3_download_dir\nBucket:\n{Bucket}\nPrefix:\n{Prefix}\nLocalPath:\n{LocalPath}') +def s3_download_dir(*,bucket, prefix=None, local_path): + print(f'Begin s3_download_dir\nbucket:\n{bucket}\nprefix:\n{prefix}\nlocal_path:\n{local_path}') paginator = s3.get_paginator('list_objects') - if Prefix: - pagination = paginator.paginate(Bucket=Bucket, Delimiter='/', Prefix=Prefix) + if prefix: + pagination = paginator.paginate(Bucket=bucket, Delimiter='/', Prefix=prefix) else: - pagination = paginator.paginate(Bucket=Bucket, Delimiter='/') + pagination = paginator.paginate(Bucket=bucket, Delimiter='/') for result in pagination: if result.get('CommonPrefixes') is not None: - for subdir in result.get('CommonPrefixes'): + for subdir in result.get('Commonprefixes'): s3_download_dir( - Prefix = subdir.get('Prefix'), - LocalPath = LocalPath, - Bucket = Bucket + prefix = subdir.get('Prefix'), + local_path = local_path, + bucket = bucket ) for file in result.get('Contents', []): - dest_pathname = os.path.join(LocalPath, file.get('Key')) + dest_pathname = os.path.join(local_path, file.get('Key')) if not os.path.exists(os.path.dirname(dest_pathname)): os.makedirs(os.path.dirname(dest_pathname)) if not file.get('Key').endswith('/'): s3_download( - Bucket=Bucket, - Key=file.get('Key'), - LocalPath=dest_pathname + bucket=bucket, + key=file.get('Key'), + local_path=dest_pathname ) -def run_bash(*, BashPath): - subprocess.run(["chmod","u+rx", BashPath]) - output = subprocess.run(["sh", f"{BashPath}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) +def run_bash(*, bash_path): + subprocess.run(["chmod","u+rx", bash_path]) + output = subprocess.run(["sh", f"{bash_path}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) print('raw subprocess output:') print(output) print('stdout:') @@ -67,17 +67,17 @@ def run_bash(*, BashPath): 'stderr': stderr } -def re_search(RegexGroup,SearchMe): - m = re.search(RegexGroup,SearchMe) +def re_search(regex_group,search_me): + m = re.search(regex_group,search_me) try: return m.group(1) except AttributeError: - print(f'Regex:\n{RegexGroup}') - print(f'SearchMe:\n{SearchMe}') + print(f'Regex:\n{regex_group}') + print(f'search_me:\n{search_me}') raise -def mkdir(Dir): - p = Path(Dir) +def mkdir(dir_): + p = Path(dir_) p.mkdir(parents=True,exist_ok=True) return str(p) @@ -97,8 +97,8 @@ def lambda_handler(event, context): print(f'begin: Get Policies') s3_download_dir( - Bucket = opa_policies_bucket, - LocalPath = policy_path_root + bucket = opa_policies_bucket, + local_path = policy_path_root ) # get json_input @@ -108,9 +108,9 @@ def lambda_handler(event, context): print(f'begin: Get json_input') s3_download( - Bucket = json_input['Bucket'], - Key = json_input['Key'], - LocalPath = json_input_path + bucket = json_input['Bucket'], + key = json_input['Key'], + local_path = json_input_path ) # to tmp @@ -123,7 +123,7 @@ def lambda_handler(event, context): # # eval - opa_eval_result = run_bash(BashPath='/tmp/opa-eval.sh') + opa_eval_result = run_bash(bash_path='/tmp/opa-eval.sh') print(f'eval_result:\n{opa_eval_result}\n{type(opa_eval_result)}') diff --git a/supplementary_files/lambdas/s3_head_object/lambda_function.py b/supplementary_files/lambdas/s3_head_object/lambda_function.py index 68d567a3..b22b61c9 100644 --- a/supplementary_files/lambdas/s3_head_object/lambda_function.py +++ b/supplementary_files/lambdas/s3_head_object/lambda_function.py @@ -4,12 +4,12 @@ s3 = boto3.client('s3') -def object_exists(*,Bucket,Key): - print(f'trying head_object\nbucket:\n{Bucket}\nkey:\n{Key}') +def object_exists(*,bucket,key): + print(f'trying head_object\nbucket:\n{bucket}\nkey:\n{key}') try: r = s3.head_object( - Bucket = Bucket, - Key = Key + Bucket = bucket, + Key = key ) except ClientError as e: print(e) @@ -23,8 +23,8 @@ def object_exists(*,Bucket,Key): else: return True -def s3_uri_to_bucket_key(*,Uri): - path_parts=Uri.replace("s3://","").split("/") +def s3_uri_to_bucket_key(*,uri): + path_parts=uri.replace("s3://","").split("/") bucket=path_parts.pop(0) key="/".join(path_parts) return bucket, key @@ -40,9 +40,9 @@ class ObjectDoesNotExistException(Exception): key = event.get('Key') if not bucket and not key: - bucket, key = s3_uri_to_bucket_key(Uri=event['S3Uri']) + bucket, key = s3_uri_to_bucket_key(uri=event['S3Uri']) - existance = object_exists(Bucket=bucket,Key=key) + existance = object_exists(bucket=bucket,key=key) print(f'existance:\n{existance}') if not existance: diff --git a/supplementary_files/lambdas/s3_put_object/lambda_function.py b/supplementary_files/lambdas/s3_put_object/lambda_function.py index 7c1760c0..a9d026bf 100644 --- a/supplementary_files/lambdas/s3_put_object/lambda_function.py +++ b/supplementary_files/lambdas/s3_put_object/lambda_function.py @@ -5,11 +5,11 @@ s3 = boto3.client('s3') -def put_object(Bucket,Key,Object:dict): - print(f'put_object\nBucket:\n{Bucket}\nKey:\n{Key}') +def put_object(bucket,Key,Object:dict): + print(f'put_object\nbucket:\n{bucket}\nKey:\n{Key}') try: r = s3.put_object( - Bucket = Bucket, + Bucket = bucket, Key = Key, Body = json.dumps(Object) ) @@ -38,7 +38,7 @@ def lambda_handler(event,context): bucket, key = s3_uri_to_bucket_key(Uri=event['S3Uri']) put_object( - Bucket = bucket, + bucket = bucket, Key = key, Object = event['Object'] ) diff --git a/supplementary_files/lambdas/s3_select/lambda_function.py b/supplementary_files/lambdas/s3_select/lambda_function.py index 69ef80c5..3559b0dd 100644 --- a/supplementary_files/lambdas/s3_select/lambda_function.py +++ b/supplementary_files/lambdas/s3_select/lambda_function.py @@ -6,19 +6,19 @@ s3 = boto3.client('s3') def s3_select_to_file( - Bucket, - Key, - Expression, - Outfile + bucket, + key, + expression, + outfile ): - print(f'select_object_content\nBucket:\n{Bucket}\nKey:\n{Key}') + print(f'select_object_content\nbucket:\n{bucket}\nkey:\n{key}') try: r = s3.select_object_content( - Bucket=Bucket, - Key=Key, - Expression=Expression, + Bucket=bucket, + Key=key, + Expression=expression, ExpressionType='SQL', InputSerialization={ 'JSON': { @@ -37,7 +37,7 @@ def s3_select_to_file( else: event_stream = r['Payload'] end_event_received = False - with open(Outfile, 'wb') as f: + with open(outfile, 'wb') as f: # Iterate over events in the event stream as they come for event in event_stream: # If we received a records event, write the data to a file @@ -73,10 +73,10 @@ def lambda_handler(event,context): bucket, key = s3_uri_to_bucket_key(Uri=event['S3Uri']) s3_select_to_file( - Bucket = bucket, - Key = key, - Expression = event['Expression'], - Outfile = select_outfile + bucket = bucket, + key = key, + expression = event['Expression'], + outfile = select_outfile ) with open(select_outfile,'r') as f: diff --git a/supplementary_files/lambdas/sign_apigw_request/lambda_function.py b/supplementary_files/lambdas/sign_apigw_request/lambda_function.py index 59cc4553..aa8b574a 100644 --- a/supplementary_files/lambdas/sign_apigw_request/lambda_function.py +++ b/supplementary_files/lambdas/sign_apigw_request/lambda_function.py @@ -12,8 +12,8 @@ region = session.region_name account_id = boto3.client('sts').get_caller_identity().get('Account') -def get_host(*,FullInvokeUrl): - m = re.search('https://(.*)/.*',FullInvokeUrl) +def get_host(*,full_invoke_url): + m = re.search('https://(.*)/.*',full_invoke_url) return m.group(1) def lambda_handler(event,context): @@ -22,7 +22,7 @@ def lambda_handler(event,context): full_invoke_url = os.environ.get('ApigwInvokeUrl') - host = get_host(FullInvokeUrl=full_invoke_url) + host = get_host(full_invoke_url=full_invoke_url) auth = BotoAWSRequestsAuth( aws_host= host, diff --git a/supplementary_files/lambdas/write_results_report/lambda_function.py b/supplementary_files/lambdas/write_results_report/lambda_function.py index e47c50a4..707f94eb 100644 --- a/supplementary_files/lambdas/write_results_report/lambda_function.py +++ b/supplementary_files/lambdas/write_results_report/lambda_function.py @@ -7,40 +7,40 @@ ddb = boto3.client('dynamodb') s3 = boto3.client('s3') -def put_object(*,S3Uri,Dict): +def put_object(*,s3_uri,dict_): - def s3_uri_to_bucket_key(*,Uri): - path_parts=Uri.replace("s3://","").split("/") + def s3_uri_to_bucket_key(*,s3_uri): + path_parts=s3_uri.replace("s3://","").split("/") bucket=path_parts.pop(0) key="/".join(path_parts) return bucket, key - bucket, key = s3_uri_to_bucket_key(Uri=S3Uri) + bucket, key = s3_uri_to_bucket_key(s3_uri=s3_uri) try: r = s3.put_object( Bucket = bucket, Key = key, - Body = json.dumps(Dict) + Body = json.dumps(dict_) ) except ClientError as e: print(f'ClientError:\n{e}') raise else: - print(f'no ClientError s3.put_object()\nS3Uri:\n{S3Uri}') + print(f'no ClientError s3.put_object()\s3_uri:\n{s3_uri}') return True def simple_pk_query(*, - Table, - Pk, - AllStringAttributes = True + table, + pk, + all_string_attributes = True ): try: r = ddb.query( - TableName=Table, + TableName=table, ExpressionAttributeValues = { ":pk": { - "S": Pk + "S": pk } }, KeyConditionExpression="pk = :pk" @@ -48,20 +48,20 @@ def simple_pk_query(*, except ClientError as e: raise else: - print(f'no ClientError ddb.query()\nTable:\n{Table}\nPk:\n{Pk}') + print(f'no ClientError ddb.query()\nTable:\n{table}\nPk:\n{pk}') print(r) items = r['Items'] - if AllStringAttributes: + if all_string_attributes: items = [{k:i[k]['S'] for k in i} for i in items] print(items) return items -def determine_compliance(*,InfractionItems,AllNestedSfnsSucceeded): - no_infractions = not bool(InfractionItems) +def determine_compliance(*,infraction_items,all_nested_sfns_succeeded): + no_infractions = not bool(infraction_items) - return no_infractions and AllNestedSfnsSucceeded + return no_infractions and all_nested_sfns_succeeded def determine_if_all_nested_sfns_succeeded(*,NestedSfns): return not bool([i for i in NestedSfns if i['InvokeInnerEvalEngineSfn']['Status']!='SUCCEEDED']) @@ -84,13 +84,13 @@ def lambda_handler(event, context): print(f'eval_results_table:\n{eval_results_table}') infraction_items = simple_pk_query( - Table = eval_results_table, - Pk = sfn_exec_id + table = eval_results_table, + pk = sfn_exec_id ) compliance = determine_compliance( - InfractionItems = infraction_items, - AllNestedSfnsSucceeded = all_nested_sfns_succeeded + infraction_items = infraction_items, + all_nested_sfns_succeeded = all_nested_sfns_succeeded ) print(f'compliance:\n{compliance}') @@ -116,8 +116,8 @@ def lambda_handler(event, context): print(f'eval_results_report:\n{eval_results_report}') put_object( - S3Uri = event['ResultsReportS3Uri'], - Dict = eval_results_report + s3_uri = event['ResultsReportS3Uri'], + dict_ = eval_results_report ) return True \ No newline at end of file