diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 492228d0..9394694a 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,7 +1,7 @@ [bumpversion] commit = False tag = True -current_version = 1.1.74 +current_version = 1.1.146 tag_name = v{current_version} message = GitHub Actions Build {current_version} diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..9671ab52 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,29 @@ +## Type of change + +- [ ] Refactor +- [ ] New feature +- [ ] Bug fix +- [ ] Optimization +- [ ] Documentation Update + +## Description + + + +## Related Tickets & Documents + +- Related Issue # +- Closes # + +## Checklist before requesting a review + +- [ ] I have performed a self-review of my code. +- [ ] If it is a core feature, I have added thorough tests. + +## Testing +- Does below tests are passed + - [ ] UnitTest + - [ ] IntegrationTest +- Please describe the System Under Test. +- Please provide detailed steps to perform tests related to this code change. +- How were the fix/results from this change verified? Please provide relevant screenshots or results. \ No newline at end of file diff --git a/.github/workflows/Build.yml b/.github/workflows/Build.yml index d615aaa4..4544a957 100644 --- a/.github/workflows/Build.yml +++ b/.github/workflows/Build.yml @@ -7,6 +7,10 @@ on: push: branches: [ main ] +concurrency: + group: merge-queue + cancel-in-progress: false + jobs: unittest: name: unittest @@ -23,6 +27,7 @@ jobs: - name: Install dependencies run: | # ldap requirements + sudo apt update -y sudo apt-get install build-essential python3-dev libldap2-dev libsasl2-dev vim -y python -m pip install --upgrade pip pip install flake8 pytest pytest-cov @@ -131,6 +136,7 @@ jobs: - name: Install dependencies run: | # ldap requirements + sudo apt update -y sudo apt-get install build-essential python3-dev libldap2-dev libsasl2-dev vim -y python -m pip install --upgrade pip pip install flake8 pytest pytest-cov @@ -148,6 +154,13 @@ jobs: aws-access-key-id: ${{ secrets.ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SECRET_ACCESS_KEY }} aws-region: ${{ secrets.REGION }} + - name: Set GCP credentials for pytest + env: + GOOGLE_APPLICATION_CREDENTIALS_CONTENTS: ${{ secrets.GOOGLE_APPLICATION_CREDENTIALS }} + RUNNER_PATH: ${{ secrets.RUNNER_PATH }} + run: | + echo "$GOOGLE_APPLICATION_CREDENTIALS_CONTENTS" > "$RUNNER_PATH/gcp_service.json" + echo "GOOGLE_APPLICATION_CREDENTIALS=$RUNNER_PATH/gcp_service.json" >> "$GITHUB_ENV" - name: 📃 Integration tests with pytest env: BUCKET: ${{ secrets.BUCKET }} @@ -158,6 +171,8 @@ jobs: AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }} AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} + GCP_DATABASE_NAME: ${{ secrets.GCP_DATABASE_NAME }} + GCP_DATABASE_TABLE_NAME: ${{ secrets.GCP_DATABASE_TABLE_NAME }} run: | pytest -v tests/integration --cov=cloud_governqance --cov-report=term-missing coverage run -m pytest -v tests/integration @@ -252,6 +267,7 @@ jobs: TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} run: | # ldap requirements + sudo apt update -y sudo apt-get install build-essential python3-dev libldap2-dev libsasl2-dev vim -y echo '⌛ Wait till package will be updated in PyPI' # Verfiy and wait till latest cloud-governance version will be updated in Pypi (timeout 900 seconds) diff --git a/.github/workflows/PR.yml b/.github/workflows/PR.yml index fa62628a..6ac2d3eb 100644 --- a/.github/workflows/PR.yml +++ b/.github/workflows/PR.yml @@ -5,15 +5,24 @@ name: PR on: pull_request_target: + types: [labeled, synchronize] branches: [ main ] jobs: + approve: # First step + runs-on: ubuntu-latest + steps: + - name: Approve + run: echo For security reasons, all pull requests need to be approved first before running any automated CI. unittest: name: unittest runs-on: ubuntu-latest + needs: [approve] strategy: matrix: python-version: [ '3.8', '3.9', '3.10' ] + # minimize potential vulnerabilities + if: ${{ contains(github.event.pull_request.labels.*.name, 'ok-to-test') }} steps: - uses: actions/checkout@v3 with: @@ -25,6 +34,7 @@ jobs: - name: Install dependencies run: | # ldap requirements + sudo apt update -y sudo apt-get install build-essential python3-dev libldap2-dev libsasl2-dev vim -y python -m pip install --upgrade pip pip install flake8 pytest pytest-cov @@ -51,7 +61,7 @@ jobs: terraform_apply: name: terraform_apply - needs: [unittest] + needs: [unittest, approve] runs-on: ubuntu-latest outputs: INSTANCE_ID: ${{ steps.terraform_instance_id.outputs.INSTANCE_ID }} @@ -99,7 +109,7 @@ jobs: integration: name: integration - needs: [ unittest, terraform_apply ] + needs: [ unittest, terraform_apply, approve ] runs-on: ubuntu-latest strategy: max-parallel: 1 @@ -129,6 +139,7 @@ jobs: - name: Install dependencies run: | # ldap requirements + sudo apt update -y sudo apt-get install build-essential python3-dev libldap2-dev libsasl2-dev vim -y python -m pip install --upgrade pip pip install flake8 pytest pytest-cov @@ -146,6 +157,13 @@ jobs: aws-access-key-id: ${{ secrets.ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SECRET_ACCESS_KEY }} aws-region: ${{ secrets.REGION }} + - name: Set GCP credentials for pytest + env: + GOOGLE_APPLICATION_CREDENTIALS_CONTENTS: ${{ secrets.GOOGLE_APPLICATION_CREDENTIALS }} + RUNNER_PATH: ${{ secrets.RUNNER_PATH }} + run: | + echo "$GOOGLE_APPLICATION_CREDENTIALS_CONTENTS" > "$RUNNER_PATH/gcp_service.json" + echo "GOOGLE_APPLICATION_CREDENTIALS=$RUNNER_PATH/gcp_service.json" >> "$GITHUB_ENV" - name: 📃 Integration tests with pytest env: BUCKET: ${{ secrets.BUCKET }} @@ -156,12 +174,14 @@ jobs: AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }} AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} + GCP_DATABASE_NAME: ${{ secrets.GCP_DATABASE_NAME }} + GCP_DATABASE_TABLE_NAME: ${{ secrets.GCP_DATABASE_TABLE_NAME }} run: | python -m pytest -v tests/integration terraform_destroy: name: terraform_destroy - needs: [unittest, terraform_apply, integration] + needs: [unittest, terraform_apply, integration, approve] if: success() || failure() runs-on: ubuntu-latest steps: @@ -203,7 +223,7 @@ jobs: e2e: name: e2e - needs: [ unittest, terraform_apply, integration ] + needs: [ unittest, terraform_apply, integration, approve ] runs-on: ubuntu-latest strategy: matrix: @@ -219,6 +239,7 @@ jobs: - name: Install dependencies run: | # ldap requirements + sudo apt update -y sudo apt-get install build-essential python3-dev libldap2-dev libsasl2-dev vim -y python -m pip install --upgrade pip pip install flake8 pytest pytest-cov diff --git a/.gitignore b/.gitignore index 8e2bc00c..a3a72c40 100644 --- a/.gitignore +++ b/.gitignore @@ -216,3 +216,4 @@ empty_test_environment_variables.py /cloud_governance/main/.env /cloud_governance/main/.test_env /cloud_governance/policy/send_mail.py +cloudsensei/.env.txt diff --git a/MANIFEST.in b/MANIFEST.in index b43e57ca..d46c12c1 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,3 @@ include cloud_governance/policy/*.yml include iam/cloud/azure/CloudGovernanceCostManagement.json - - - +include cloud_governance/common/mails/templates/cro_request_for_manager_approval.j2 diff --git a/README.md b/README.md index a2f9e6fd..49d8f057 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ This tool support the following policies: * [s3_inactive](cloud_governance/policy/aws/s3_inactive.py): Get the inactive/empty buckets and delete them after 7 days. * [empty_roles](cloud_governance/policy/aws/empty_roles.py): Get empty roles and delete it after 7 days. * [zombie_snapshots](cloud_governance/policy/aws/zombie_snapshots.py): Get the zombie snapshots and delete it after 7 days. -* [nat_gateway_unused](cloud_governance/policy/aws/nat_gateway_unused.py): Get the unused nat gateways and deletes it after 7 days. +* [nat_gateway_unused](cloud_governance/policy/aws/unused_nat_gateway.py): Get the unused nat gateways and deletes it after 7 days. * gitleaks: scan Github repository git leak (security scan) * [cost_over_usage](cloud_governance/policy/aws/cost_over_usage.py): send mail to aws user if over usage cost diff --git a/aws_lambda_functions/CloudResourceOrchestration/lambda_function.py b/aws_lambda_functions/CloudResourceOrchestration/lambda_function.py new file mode 100644 index 00000000..06fd1bea --- /dev/null +++ b/aws_lambda_functions/CloudResourceOrchestration/lambda_function.py @@ -0,0 +1,101 @@ +import json +import boto3 +import jira + +ssm_client = boto3.client('ssm', region_name='us-east-1') +APPROVED = 'APPROVED' +REJECT = 'REJECT' +REFINEMENT = '61' +CLOSED = '41' +JIRA_TOKEN = 'JIRA_TOKEN' +JIRA_PROJECT = 'JIRA_PROJECT' +JIRA_API_SERVER = 'JIRA_API_SERVER' +CRO_ADMINS = ['athiruma@redhat.com', 'natashba@redhat.com', 'ebattat@redhat.com'] + + +def get_receive_mail_details(event_data): + """ + This method returns the received mail data + :param event_data: + :return: + # """ + records = event_data.get('Records') + mail_details = [] + if records: + for record in records: + if record.get('eventSource') == 'aws:ses': + ses_data = record.get('ses') + common_headers = ses_data.get('mail', {}).get('commonHeaders', {}) + if common_headers: + mail_details.append({ + 'from': ses_data.get('mail', {}).get('source'), + 'to': common_headers.get('to'), + 'subject': common_headers.get('subject') + }) + return mail_details + + +def lambda_handler(event, context): + """ + This lambda function is to approve the user budget by Email Receiving + :param event: + :param context: + :return: + """ + try: + parameters = ssm_client.get_parameters(Names=[JIRA_TOKEN, JIRA_PROJECT, JIRA_API_SERVER], WithDecryption=True)['Parameters'] + if parameters: + output_parameters = {} + for parameter in parameters: + output_parameters[parameter.get('Name')] = parameter.get('Value') + jira_auth_token = output_parameters.get(JIRA_TOKEN) + jira_server_api = output_parameters.get(JIRA_API_SERVER) + jira_project = output_parameters.get(JIRA_PROJECT) + jira_conn = jira.JIRA(server=jira_server_api, token_auth=jira_auth_token) + mail_results = get_receive_mail_details(event) + for mail_result in mail_results: + action, ticket = mail_result.get('subject').split(';') + manager_mail = mail_result.get('from') + ticket_id = f'{jira_project}-{ticket}' + issue = jira_conn.issue(id=ticket_id) + jira_description = jira_conn.issue(ticket_id).fields.description + fields = {} + for filed_value in jira_description.split('\n'): + if filed_value: + if ':' in filed_value: + key, value = filed_value.strip().split(':', 1) + fields[key.strip()] = value.strip() + CRO_ADMINS.append(fields.get('ManagerApprovalAddress')) + if manager_mail in CRO_ADMINS: + jira_description += f'\nApprovedManager: {mail_result.get("from")}\n' + if action.upper() == APPROVED: + issue.update(description=jira_description, comment=f'From: {manager_mail}\nApproved\nPlease refer to your manager, in case any issues') + jira_conn.transition_issue(issue=ticket_id, transition=REFINEMENT) + return { + 'statusCode': 204, + 'body': json.dumps(f'Approved the TicketId: {ticket}, by Manager: {manager_mail}') + } + else: + if action.upper() == REJECT: + jira_conn.transition_issue(issue=ticket_id, transition=CLOSED, comment=f'From: {manager_mail}\nRejected\nPlease refer to your manager, in case any issues') + return { + 'statusCode': 204, + 'body': json.dumps(f'Rejected the TicketId: {ticket}, by Manager: {manager_mail}') + } + else: + issue.update(comment=f'From: {manager_mail}\n{manager_mail.split("@")[0]} is not authorized to perform this action') + return { + 'statusCode': 500, + 'body': json.dumps(f'{manager_mail} is not authorized to perform this action') + } + + else: + return { + 'statusCode': 400, + 'body': json.dumps(f'Jira Token not found in the parameter store') + } + except Exception as err: + return { + 'statusCode': 500, + 'body': json.dumps(f'Something went wrong {err}') + } diff --git a/aws_lambda_functions/CloudResourceOrchestration/requirements.txt b/aws_lambda_functions/CloudResourceOrchestration/requirements.txt new file mode 100644 index 00000000..0172fc1f --- /dev/null +++ b/aws_lambda_functions/CloudResourceOrchestration/requirements.txt @@ -0,0 +1,2 @@ +boto3==1.26.1 +jira~=3.4.1 diff --git a/aws_lambda_functions/CloudResourceOrchestration/upload_to_lambda.sh b/aws_lambda_functions/CloudResourceOrchestration/upload_to_lambda.sh new file mode 100755 index 00000000..b636e154 --- /dev/null +++ b/aws_lambda_functions/CloudResourceOrchestration/upload_to_lambda.sh @@ -0,0 +1,25 @@ +PROJECT_NAME="CloudResourceOrchestration" +SUCCESS_OUTPUT_PATH="/dev/null" +AWS_DEFAULT_REGION="us-east-1" +echo "Clearing if previously created zip file" + +PROJECT_PATH="$PWD/$PROJECT_NAME.zip" + +if [ -f $PROJECT_PATH ]; then + rm -rf $PROJECT_PATH + rm -rf ./package + echo "Deleted Previously created zip file" +fi + +pip install --target ./package -r requirements.txt > $SUCCESS_OUTPUT_PATH +pushd package +zip -r ../$PROJECT_NAME.zip . > $SUCCESS_OUTPUT_PATH +popd +zip -g $PROJECT_NAME.zip lambda_function.py > $SUCCESS_OUTPUT_PATH + +echo "#############################" +# Uploading to AWS Lambda +echo "Uploading to AWS Lambda install Region: $AWS_DEFAULT_REGION" +aws lambda update-function-code --function-name CloudResourceOrch --zip-file fileb://$PROJECT_PATH --region $AWS_DEFAULT_REGION > $SUCCESS_OUTPUT_PATH +echo "Uploaded to AWS Lambda" +echo "#############################" diff --git a/cloud_governance/cloud_resource_orchestration/README.md b/cloud_governance/cloud_resource_orchestration/README.md new file mode 100644 index 00000000..7e9986cb --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/README.md @@ -0,0 +1,14 @@ +## Cloud Resource Orchestration + +This is the process to control costs on public clouds. \ +This process requires the data how many days a project will run and estimated_cost. \ +Details are collected from the front end page @[https://cloud-governance.rdu2.scalelab.redhat.com/](https://cloud-governance.rdu2.scalelab.redhat.com/) +After filling the form, mail sent to manager for approval after approved your request. +Tag your instances with TicketId: #ticket_number. \ +Then cloud_governance will start **cloud_resource_orchestration** and monitor your instances. + +To start **cloud_resource_orchestration** CI run the below podman command + +```commandline +podman run --net="host" --rm --name cloud_resource_orchestration -e AWS_DEFAULT_REGION="ap-south-1" -e CLOUD_RESOURCE_ORCHESTRATION="True" -e account="$account" -e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" -e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" -e PUBLIC_CLOUD_NAME="$PUBLIC_CLOUD_NAME" -e es_host="$ES_HOST" -e es_port="$ES_PORT" -e CRO_ES_INDEX="$CRO_ES_INDEX" -e log_level="INFO" -e LDAP_HOST_NAME="$LDAP_HOST_NAME" -e JIRA_QUEUE="$JIRA_QUEUE" -e JIRA_TOKEN="$JIRA_TOKEN" -e JIRA_USERNAME="$JIRA_USERNAME" -e JIRA_URL="$JIRA_URL" -e CRO_COST_OVER_USAGE="$CRO_COST_OVER_USAGE" -e CRO_PORTAL="$CRO_PORTAL" -e CRO_DEFAULT_ADMINS="$CRO_DEFAULT_ADMINS" -e CRO_REPLACED_USERNAMES="$CRO_REPLACED_USERNAMES" -e CRO_DURATION_DAYS="30" quay.io/ebattat/cloud-governance:latest +``` \ No newline at end of file diff --git a/cloud_governance/cloud_resource_orchestration/aws/long_run/ec2_long_run.py b/cloud_governance/cloud_resource_orchestration/aws/long_run/ec2_long_run.py deleted file mode 100644 index dd884e84..00000000 --- a/cloud_governance/cloud_resource_orchestration/aws/long_run/ec2_long_run.py +++ /dev/null @@ -1,137 +0,0 @@ - -import datetime - - -from cloud_governance.cloud_resource_orchestration.aws.long_run.monitor_long_run import MonitorLongRun -from cloud_governance.cloud_resource_orchestration.aws.long_run.tag_long_run import TagLongRun -from cloud_governance.common.elasticsearch.elastic_upload import ElasticUpload -from cloud_governance.common.logger.init_logger import logger -from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp - -from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations -from cloud_governance.main.environment_variables import environment_variables - - -class EC2LongRun: - """ - This class tag & monitor the LongRun EC2 instances. - User Steps: - 1. Create a Jira Issue in Clouds portal, store the JiraId - 2. Create the EC2 instance, tag JiraId - CI Steps: - 1. CI Look the instances which are tagged with JiraId - 2. Checks the JiraId had manager approval in the data - 3. If manger approval, append LongRun tags ( Project, LongRunDays, ApprovedManager ) - """ - - def __init__(self, region_name: str = ''): - self.__environment_variables_dict = environment_variables.environment_variables_dict - self.__region_name = region_name if region_name else self.__environment_variables_dict.get('AWS_DEFAULT_REGION') - self.__tag_long_run = TagLongRun(region_name=region_name) - self.__monitor_long_run = MonitorLongRun(region_name=region_name) - self.__ec2_operations = EC2Operations() - self.__es_upload = ElasticUpload() - self.__es_index = self.__environment_variables_dict.get('es_index') - self.__account = self.__environment_variables_dict.get('account') - self.__jira_queue = self.__environment_variables_dict.get('JIRA_QUEUE') - - def update_es_data(self, cost_estimation: float, instances: list, jira_id: str): - """This method update the es_data""" - es_data = self.__es_upload.elastic_search_operations.get_es_data_by_id(id=jira_id, index=self.__es_index) - source = es_data.get('_source') - instance_ids = source.get('instance_ids', []) - update_es_data = {'cost_estimation': cost_estimation, - 'long_run_days': int(instances[0].get('long_run_days')), - 'total_instances': len(instances)} - running_days = 0 - total_price = 0 - for instance in instances: - running_days = max(running_days, instance.get('instance_running_days')) - total_price += instance.get('total_run_price') - instance_id = instance.get('instance_id') - instance_ids.append(instance_id) - source_instance_id = source.get(instance_id) - update_es_data[instance_id] = \ - {'total_run_price': round(source_instance_id.get('total_run_price') + instance.get('total_run_price'), - 3)} - update_es_data[instance_id].update({'total_run_hours': round( - source_instance_id.get('total_run_hours') + instance.get('total_run_hours'), 3)}) - update_es_data[instance_id].update({'last_saved_time': instance.get('last_saved_time')}) - update_es_data[instance_id].update({'instance_state': instance.get('instance_state')}) - update_es_data[instance_id].update({'instance_running_days': instance.get('instance_running_days')}) - update_es_data[instance_id].update( - {'ebs_cost': round(float(instance.get('ebs_cost') + source_instance_id.get('ebs_cost')), 3)}) - update_es_data['running_days'] = running_days - update_es_data['total_run_price'] = float(source.get('total_run_price')) + total_price - update_es_data['remaining_days'] = int(update_es_data.get('long_run_days')) - running_days - update_es_data['timestamp'] = datetime.datetime.utcnow() - update_es_data['instance_ids'] = list(set(instance_ids)) - self.__es_upload.elastic_search_operations.update_elasticsearch_index(index=self.__es_index, id=jira_id, - metadata=update_es_data) - logger.info(f'Updated the jira-id: {jira_id} data : {update_es_data}') - - def upload_new_es_data(self, jira_id: str, instances: list, cost_estimation: float): - """This method upload the new es_data""" - es_data = {'jira_id': jira_id, 'cloud_name': 'aws', 'account_name': self.__account, - 'user': instances[0].get('user'), 'long_run_days': instances[0].get('long_run_days'), - 'owner': instances[0].get('owner'), 'approved_manager': instances[0].get('approved_manager'), - 'user_manager': instances[0].get('manager'), 'region_name': self.__region_name, - 'project': instances[0].get('project'), 'cost_estimation': cost_estimation, - 'jira_id_state': 'in-progress', - 'total_instances': len(instances)} - running_days = 0 - total_price = 0 - for instance in instances: - running_days = max(running_days, instance.get('instance_running_days')) - total_price += instance.get('total_run_price') - es_data.setdefault('instance_ids', []).append(instance.get('instance_id')) - instance.pop('user') - instance.pop('long_run_days') - instance.pop('owner') - instance.pop('approved_manager') - instance.pop('project') - instance.pop('manager') - es_data[instance.get('instance_id')] = instance - es_data['running_days'] = running_days - es_data['remaining_days'] = int(es_data.get('long_run_days')) - running_days - es_data['total_run_price'] = round(total_price, 3) - es_data['timestamp'] = datetime.datetime.utcnow() - self.__es_upload.es_upload_data(items=[es_data], es_index=self.__es_index, set_index='jira_id') - logger.info(f'Uploaded data to the es index {self.__es_index}') - - @logger_time_stamp - def prepare_to_upload_es(self, upload_data: dict): - """ - This method beautify and upload data to ES - """ - for jira_id, instances in upload_data.items(): - issue_description = self.__tag_long_run.jira_operations.get_issue_description(jira_id=jira_id, state='any') - cost_estimation = float(issue_description.get('CostEstimation', 0)) - cost_estimation += float( - self.__tag_long_run.jira_operations.get_issue_sub_tasks_cost_estimation(jira_id=jira_id)) - if self.__jira_queue not in jira_id: - jira_id = f"{self.__jira_queue}-{jira_id}" - if self.__es_upload.elastic_search_operations.verify_elastic_index_doc_id(index=self.__es_index, - doc_id=jira_id): - self.update_es_data(cost_estimation=cost_estimation, instances=instances, jira_id=jira_id) - else: - self.upload_new_es_data(cost_estimation=cost_estimation, instances=instances, jira_id=jira_id) - - def __long_run(self): - """ - This method start the long run process - 1. tag the instances which have tag JiraId - 2. Monitor the long_run instances based on LongRunDays - """ - tag_response = self.__tag_long_run.run() - if tag_response: - logger.info(f'Tags are added to the JiraId tag instances: {tag_response}') - monitor_response = self.__monitor_long_run.run() - if monitor_response: - self.prepare_to_upload_es(monitor_response) - - def run(self): - """ - This method run the long run methods - """ - self.__long_run() diff --git a/cloud_governance/cloud_resource_orchestration/aws/long_run/monitor_in_progress_issues.py b/cloud_governance/cloud_resource_orchestration/aws/long_run/monitor_in_progress_issues.py deleted file mode 100644 index 7e6f3996..00000000 --- a/cloud_governance/cloud_resource_orchestration/aws/long_run/monitor_in_progress_issues.py +++ /dev/null @@ -1,90 +0,0 @@ -from datetime import datetime - -from cloud_governance.cloud_resource_orchestration.common.ec2_monitor_operations import EC2MonitorOperations -from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations -from cloud_governance.common.elasticsearch.elastic_upload import ElasticUpload -from cloud_governance.common.jira.jira_operations import JiraOperations -from cloud_governance.main.environment_variables import environment_variables - - -class MonitorInProgressIssues: - """ - This class monitor the in-progress jira instances - If the instances are terminated then it closes the JiraTicket - """ - - def __init__(self, region_name: str = ''): - self.__environment_variables_dict = environment_variables.environment_variables_dict - self.__region_name = region_name if region_name else self.__environment_variables_dict.get('AWS_DEFAULT_REGION') - self.__ec2_operations = EC2Operations(region=self.__region_name) - self.jira_operations = JiraOperations() - self.__es_upload = ElasticUpload() - self.__es_index = self.__environment_variables_dict.get('es_index') - self.__ec2_monitor_operations = EC2MonitorOperations(region_name=self.__region_name) - self.__jira_queue = self.__environment_variables_dict.get('JIRA_QUEUE') - self.__trails_snapshot_time = self.__environment_variables_dict.get('TRAILS_SNAPSHOT_TIME') - - def __update_data_and_close_ticket(self, jira_id: str): - """ - This method update data in the es and close the ticket - """ - update_data = {'jira_id_state': 'Closed', 'instance_state': 'terminated', 'timestamp': datetime.utcnow()} - self.__es_upload.elastic_search_operations.update_elasticsearch_index(index=self.__es_index, metadata=update_data, id=jira_id) - self.jira_operations.move_issue_state(jira_id=jira_id, state='closed') - - def monitor_progress_issues(self): - """ - This method monitor the in-progress issues, and closed the issue if the instance is terminated - """ - jira_ids = self.jira_operations.get_all_issues_in_progress() - es_jira_ids = [] - for jira_id, region in jira_ids.items(): - if region == self.__region_name: - if self.__es_upload.elastic_search_operations.verify_elastic_index_doc_id(index=self.__es_index, doc_id=jira_id): - es_jira_ids.append(jira_id) - long_run_jira_ids = [] - long_run_instances = self.__ec2_monitor_operations.get_instances_by_filtering(tag_key_name='JiraId') - for instance in long_run_instances: - for resource in instance['Instances']: - jira_id = self.__ec2_operations.get_tag_value_from_tags(tags=resource.get('Tags'), tag_name='JiraId') - if self.__jira_queue not in jira_id: - jira_id = f'{self.__jira_queue}-{jira_id}' - long_run_jira_ids.append(jira_id) - terminated_jira_ids = set(es_jira_ids) - set(long_run_jira_ids) - for jira_id in terminated_jira_ids: - es_data = self.__es_upload.elastic_search_operations.get_es_data_by_id(id=jira_id, index=self.__es_index) - source = es_data.get('_source') - instance_ids = source.get('instance_ids') - total_price = 0 - terminated = 0 - running_days = 0 - for instance_id in instance_ids: - if source[instance_id].get('instance_state') != 'terminated': - last_saved_time = datetime.strptime(source[instance_id].get('last_saved_time'), "%Y-%m-%dT%H:%M:%S%z") - launch_time = datetime.strptime(source[instance_id].get('instance_create_time'), "%Y-%m-%dT%H:%M:%S%z") - create_datetime = datetime.strptime(source[instance_id].get('instance_create_time'), "%Y-%m-%dT%H:%M:%S%z") - trails = self.__ec2_monitor_operations.get_instance_logs(instance_id, last_saved_time=last_saved_time) - running_days = max(running_days, self.__ec2_monitor_operations.calculate_days(create_datetime)) - run_hours = self.__ec2_monitor_operations.get_run_hours_from_trails(last_saved_time=last_saved_time, trails=trails, - launch_time=launch_time, - last_instance_state=source[instance_id].get('instance_state'), create_datetime=create_datetime, present_state='terminated') - price = self.__ec2_monitor_operations.get_instance_hours_price(instance_type=source[instance_id].get('instance_type'), run_hours=run_hours) - source[instance_id]['total_run_price'] = round(float(source[instance_id]['total_run_price']) + price, 3) - source[instance_id]['total_run_hours'] = round(run_hours + float(source[instance_id]['total_run_hours']), 3) - source[instance_id]['instance_state'] = 'terminated' - source[instance_id]['last_saved_time'] = self.__trails_snapshot_time - source[instance_id]['instance_running_days'] = self.__ec2_monitor_operations.calculate_days(create_datetime) - total_price += price - terminated += 1 - source['total_run_price'] += total_price - source['timestamp'] = datetime.utcnow() - if running_days != 0: - source['running_days'] = running_days - source['remaining_days'] = source['long_run_days'] - running_days - - if source.get('remaining_days') == source.get('long_run_days') or terminated == len(instance_ids): - self.__update_data_and_close_ticket(jira_id=jira_id) - source['jira_id_state'] = 'closed' - self.__es_upload.elastic_search_operations.update_elasticsearch_index(index=self.__es_index, - metadata=source, id=jira_id) - return terminated_jira_ids diff --git a/cloud_governance/cloud_resource_orchestration/aws/long_run/monitor_long_run.py b/cloud_governance/cloud_resource_orchestration/aws/long_run/monitor_long_run.py deleted file mode 100644 index b0f10e07..00000000 --- a/cloud_governance/cloud_resource_orchestration/aws/long_run/monitor_long_run.py +++ /dev/null @@ -1,117 +0,0 @@ - -from cloud_governance.cloud_resource_orchestration.aws.long_run.monitor_in_progress_issues import MonitorInProgressIssues -from cloud_governance.cloud_resource_orchestration.aws.long_run.tag_long_run import TagLongRun -from cloud_governance.cloud_resource_orchestration.common.ec2_monitor_operations import EC2MonitorOperations -from cloud_governance.common.elasticsearch.elastic_upload import ElasticUpload -from cloud_governance.common.jira.jira import logger -from cloud_governance.common.jira.jira_operations import JiraOperations -from cloud_governance.common.ldap.ldap_search import LdapSearch -from cloud_governance.common.mails.mail_message import MailMessage -from cloud_governance.common.mails.postfix import Postfix -from cloud_governance.main.environment_variables import environment_variables - -from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations - - -class MonitorLongRun: - """This class monitors the long run instances and returns the data""" - - FIRST_ALERT: int = 5 - SECOND_ALERT: int = 3 - DEFAULT_ADMINS = ['athiruma@redhat.com', 'ebattat@redhat.com', 'natashba@redhat.com'] - HOURS_IN_SECONDS = 3600 - JIRA_ID = 'JiraId' - - def __init__(self, region_name: str = ''): - self.__environment_variables_dict = environment_variables.environment_variables_dict - self.__region_name = region_name if region_name else self.__environment_variables_dict.get('AWS_DEFAULT_REGION') - self.__ec2_operations = EC2Operations(region=self.__region_name) - self.__ldap_search = LdapSearch(ldap_host_name=self.__environment_variables_dict.get('LDAP_HOST_NAME')) - self.__tag_long_run = TagLongRun(region_name=self.__region_name) - self.jira_operations = JiraOperations() - self.__es_upload = ElasticUpload() - self.__es_index = self.__environment_variables_dict.get('es_index') - self.__mail_message = MailMessage() - self.__postfix = Postfix() - self.__ec2_monitor_operations = EC2MonitorOperations(region_name=self.__region_name) - self.monitor_in_progress = MonitorInProgressIssues(region_name=self.__region_name) - self.__jira_queue = self.__environment_variables_dict.get('JIRA_QUEUE') - - def __alert_instance_user(self, issues_data: dict): - """ - This method alert the instance user, if the LongRunDays are running out - """ - for jira_id, instances in issues_data.items(): - if self.__jira_queue not in jira_id: - jira_id = f'{self.__jira_queue}-{jira_id}' - long_run_days = int(instances[0].get('long_run_days')) - approved_manager = instances[0].get('approved_manager') - user = instances[0].get('user') - running_days = 0 - for instance in instances: - running_days = max(running_days, instance.get('instance_running_days')) - cc = self.DEFAULT_ADMINS - if approved_manager: - cc.append(approved_manager) - user_details = self.__ldap_search.get_user_details(user_name=user) - if user_details: - cc.append(f'{user_details.get("managerId")}@redhat.com') - if running_days >= long_run_days - self.FIRST_ALERT: - sub_tasks = self.jira_operations.get_jira_id_sub_tasks(jira_id=jira_id) - if sub_tasks: - self.__tag_long_run.tag_extend_instances(sub_tasks=sub_tasks, jira_id=jira_id) - subject, body = '', '' - if running_days == long_run_days - self.FIRST_ALERT: - subject, body = self.__mail_message.get_long_run_alert(user=user, days=self.FIRST_ALERT, jira_id=jira_id) - elif running_days == long_run_days - self.SECOND_ALERT: - subject, body = self.__mail_message.get_long_run_alert(user=user, days=self.FIRST_ALERT, jira_id=jira_id) - else: - if running_days >= long_run_days: - subject, body = self.__mail_message.get_long_run_expire_alert(user=user, jira_id=jira_id) - if subject and body: - self.__postfix.send_email_postfix(subject=subject, to=user, cc=cc, content=body, mime_type='html') - - def monitor_instances(self): - """ - This method monitoring the LongRun instances which have tag LongRunDays - """ - jira_id_alerts = {} - long_run_instances = self.__ec2_monitor_operations.get_instances_by_filtering(tag_key_name='LongRunDays') - for instance in long_run_instances: - for resource in instance['Instances']: - instance_id, instance_type, tags, launch_datetime, instance_state = resource.get('InstanceId'), resource.get('InstanceType'), resource.get('Tags'), resource.get('LaunchTime'), resource.get('State')['Name'] - jira_id = self.__ec2_operations.get_tag_value_from_tags(tag_name=self.JIRA_ID, tags=tags) - run_hours, last_saved_time = self.__ec2_monitor_operations.get_instance_run_hours(instance=resource, jira_id=jira_id) - price = self.__ec2_monitor_operations.get_instance_hours_price(instance_type=instance_type, run_hours=run_hours) - create_time = self.__ec2_monitor_operations.get_attached_time(volume_list=resource.get('BlockDeviceMappings')) - ebs_cost = self.__ec2_monitor_operations.get_volumes_cost(resource.get('BlockDeviceMappings')) - running_days = self.__ec2_monitor_operations.calculate_days(launch_date=launch_datetime) - jira_id_alerts.setdefault(jira_id, []).append({ - 'instance_id': instance_id, - 'total_run_hours': run_hours, - 'total_run_price': price, - 'instance_create_time': create_time, - 'instance_state': instance_state, - 'instance_type': instance_type, - 'last_saved_time': last_saved_time, - 'jira_id': jira_id, - 'user': self.__ec2_operations.get_tag_value_from_tags(tag_name='User', tags=tags), - 'manager': self.__ec2_operations.get_tag_value_from_tags(tag_name='Manager', tags=tags), - 'approved_manager': self.__ec2_operations.get_tag_value_from_tags(tag_name='ApprovedManager', tags=tags), - 'long_run_days': self.__ec2_operations.get_tag_value_from_tags(tag_name='LongRunDays', tags=tags), - 'instance_running_days': running_days, - 'owner': self.__ec2_operations.get_tag_value_from_tags(tag_name='Owner', tags=tags), - 'project': self.__ec2_operations.get_tag_value_from_tags(tag_name='Project', tags=tags), - 'instance_name': self.__ec2_operations.get_tag_value_from_tags(tag_name='Name', tags=tags), - 'ebs_cost': ebs_cost - }) - self.__alert_instance_user(issues_data=jira_id_alerts) - return jira_id_alerts - - def run(self): - """ - This method run the long run monitoring methods - """ - response = self.monitor_in_progress.monitor_progress_issues() - logger.info(f"Closed JiraId's: {response}") - return self.monitor_instances() diff --git a/cloud_governance/cloud_resource_orchestration/aws/long_run/tag_long_run.py b/cloud_governance/cloud_resource_orchestration/aws/long_run/tag_long_run.py deleted file mode 100644 index 87cb6a80..00000000 --- a/cloud_governance/cloud_resource_orchestration/aws/long_run/tag_long_run.py +++ /dev/null @@ -1,126 +0,0 @@ -import datetime - -import boto3 - -from cloud_governance.common.elasticsearch.elastic_upload import ElasticUpload -from cloud_governance.common.logger.init_logger import logger -from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp - -from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations -from cloud_governance.common.jira.jira_operations import JiraOperations -from cloud_governance.main.environment_variables import environment_variables - - -class TagLongRun: - """ - This class tag the long run days and tag extend the long run days - """ - - DEFAULT_SEARCH_TAG = 'JiraId' - KEY = 'Key' - VALUE = 'Value' - - def __init__(self, region_name: str = ''): - self.__environment_variables_dict = environment_variables.environment_variables_dict - self.__region_name = region_name if region_name else self.__environment_variables_dict.get('AWS_DEFAULT_REGION') - self.__ec2_client = boto3.client('ec2', region_name=self.__region_name) - self.__ec2_operations = EC2Operations(region=self.__region_name) - self.jira_operations = JiraOperations() - self.__es_upload = ElasticUpload() - self.__jira_queue = self.__environment_variables_dict.get('JIRA_QUEUE') - - @logger_time_stamp - def tag_extend_instances(self, sub_tasks: list, jira_id: str): - """This method extend the longrun days if the user opened the jira ticket""" - filters = {'Filters': [{'Name': 'tag:JiraId', 'Values': [jira_id]}]} - extend_long_run_days = 0 - for task_id in sub_tasks: - description = self.jira_operations.get_issue_description(jira_id=task_id, sub_task=True) - extend_long_run_days += int(description.get('Days')) - instances = self.__ec2_operations.get_instances(**filters) - long_run_days = 0 - instance_ids = [] - for instance in instances: - for resource in instance['Instances']: - tags = resource.get('Tags') - if tags: - long_run_days = int(self.__ec2_operations.get_tag_value_from_tags(tag_name='LongRunDays', tags=tags)) - instance_ids.append(resource.get('InstanceId')) - if long_run_days > 0: - long_run_days += extend_long_run_days - tag = [{'Key': 'LongRunDays', 'Value': str(long_run_days)}] - self.__ec2_client.create_tags(Resources=instance_ids, Tags=tag) - data = { - 'long_run_days': long_run_days, - 'timestamp': datetime.datetime.utcnow() - } - if self.__es_upload.elastic_search_operations: - self.__es_upload.elastic_search_operations.update_elasticsearch_index(metadata=data, id=jira_id, index=self.__es_upload.es_index) - for task_id in sub_tasks: - self.jira_operations.move_issue_state(jira_id=task_id, state='closed') - - @logger_time_stamp - def tag_jira_id_attach_instance(self, jira_id: str, instance_id: str, volume_ids: list): - """ - This method tag the long run instance with tags - """ - jira_description = self.jira_operations.get_issue_description(jira_id=jira_id, state=self.jira_operations.JIRA_TRANSITION_IDS.get('INPROGRESS')) - if jira_description: - if self.__jira_queue not in jira_id: - jira_id = f'{self.__jira_queue}-{jira_id}' - long_run_days = jira_description.get('Days') - manager_approved = jira_description.get('ApprovedManager') - if not manager_approved: - manager_approved = jira_description.get('ManagerApprovalAddress') - user_email = jira_description.get('EmailAddress') - user = user_email.split('@')[0] - project = jira_description.get('Project') - tags = [{self.KEY: 'LongRunDays', self.VALUE: long_run_days}, - {self.KEY: 'ApprovedManager', self.VALUE: manager_approved}, - {self.KEY: 'Project', self.VALUE: project.upper()}, - {self.KEY: 'Email', self.VALUE: user_email}, - {self.KEY: self.DEFAULT_SEARCH_TAG, self.VALUE: jira_id}, - {self.KEY: 'User', self.VALUE: user}] - self.__ec2_client.create_tags(Resources=[instance_id], Tags=tags) - self.jira_operations.move_issue_state(jira_id=jira_id, state='inprogress') - logger.info(f'Extra tags are added to the instances: {instance_id}, had an jira_id: {jira_id}') - if volume_ids: - self.__ec2_client.create_tags(Resources=volume_ids, Tags=tags) - logger.info(f'Tagged the instance: {instance_id} attached volumes {volume_ids}') - return True - return False - - def __get_instance_volumes(self, block_device_mappings: list): - """This method returns the instance volumes""" - volumes_list = [] - for mapping in block_device_mappings: - if mapping.get('Ebs').get('VolumeId'): - volumes_list.append(mapping.get('Ebs').get('VolumeId')) - return volumes_list - - @logger_time_stamp - def __find_tag_instances(self): - """ - This method list the instances and tagged the instances which have the tag IssueId - """ - instances = self.__ec2_operations.get_instances() - jira_id_instances = {} - for instance in instances: - for resource in instance['Instances']: - instance_id = resource.get('InstanceId') - jira_id = '' - if resource.get('Tags'): - jira_id = self.__ec2_operations.get_tag_value_from_tags(tags=resource.get('Tags'), tag_name=self.DEFAULT_SEARCH_TAG) - if jira_id: - long_run_days = self.__ec2_operations.get_tag_value_from_tags(tags=resource.get('Tags'), tag_name='LongRunDays') - if not long_run_days: - volume_ids = self.__get_instance_volumes(resource.get('BlockDeviceMappings')) - if self.tag_jira_id_attach_instance(jira_id=jira_id, instance_id=instance_id, volume_ids=volume_ids): - jira_id_instances.setdefault(jira_id, []).append(instance_id) - return jira_id_instances - - def run(self): - """ - This method run the tagging of long run - """ - return self.__find_tag_instances() diff --git a/cloud_governance/cloud_resource_orchestration/aws/short_run/ec2_short_run.py b/cloud_governance/cloud_resource_orchestration/aws/short_run/ec2_short_run.py deleted file mode 100644 index 246b5d5b..00000000 --- a/cloud_governance/cloud_resource_orchestration/aws/short_run/ec2_short_run.py +++ /dev/null @@ -1,7 +0,0 @@ - - -# @Todo TBD, after the long_run -class EC2ShortRun: - - def run(self): - pass diff --git a/cloud_governance/cloud_resource_orchestration/aws/__init__.py b/cloud_governance/cloud_resource_orchestration/clouds/__init__.py similarity index 100% rename from cloud_governance/cloud_resource_orchestration/aws/__init__.py rename to cloud_governance/cloud_resource_orchestration/clouds/__init__.py diff --git a/cloud_governance/cloud_resource_orchestration/aws/long_run/__init__.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/__init__.py similarity index 100% rename from cloud_governance/cloud_resource_orchestration/aws/long_run/__init__.py rename to cloud_governance/cloud_resource_orchestration/clouds/aws/__init__.py diff --git a/cloud_governance/cloud_resource_orchestration/aws/short_run/__init__.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/__init__.py similarity index 100% rename from cloud_governance/cloud_resource_orchestration/aws/short_run/__init__.py rename to cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/__init__.py diff --git a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/aws_monitor_tickets.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/aws_monitor_tickets.py new file mode 100644 index 00000000..ab454752 --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/aws_monitor_tickets.py @@ -0,0 +1,219 @@ +import json +import logging +import os +import tempfile +from datetime import datetime + +import boto3 +import typeguard +from jinja2 import Environment, FileSystemLoader + +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.aws_tagging_operations import AWSTaggingOperations +from cloud_governance.cloud_resource_orchestration.common.abstract_monitor_tickets import AbstractMonitorTickets +from cloud_governance.cloud_resource_orchestration.utils.common_operations import get_tag_value_by_name +from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations +from cloud_governance.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations +from cloud_governance.common.jira.jira_operations import JiraOperations +from cloud_governance.common.ldap.ldap_search import LdapSearch +from cloud_governance.common.logger.init_logger import handler, logger +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.common.mails.mail_message import MailMessage +from cloud_governance.common.mails.postfix import Postfix +from cloud_governance.main.environment_variables import environment_variables + + +class AWSMonitorTickets(AbstractMonitorTickets): + """This method monitor the Jira Tickets""" + + NEW = 'New' + REFINEMENT = 'Refinement' + CLOSED = 'Closed' + IN_PROGRESS = 'In Progress' + CLOSE_JIRA_TICKET = 0 + FIRST_CRO_ALERT: int = 5 + SECOND_CRO_ALERT: int = 3 + DEFAULT_ROUND_DIGITS: int = 3 + + def __init__(self, region_name: str = ''): + super().__init__() + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.__cro_resource_tag_name = self.__environment_variables_dict.get('CRO_RESOURCE_TAG_NAME') + self.__jira_operations = JiraOperations() + self.__region_name = region_name if region_name else self.__environment_variables_dict.get('AWS_DEFAULT_REGION') + self.es_cro_index = self.__environment_variables_dict.get('CRO_ES_INDEX', '') + self.__default_admins = self.__environment_variables_dict.get('CRO_DEFAULT_ADMINS', []) + self.__cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME', '') + self.__account = self.__environment_variables_dict.get('account', '') + self.__es_host = self.__environment_variables_dict.get('es_host', '') + self.__es_port = self.__environment_variables_dict.get('es_port', '') + self.__es_operations = ElasticSearchOperations(es_host=self.__es_host, es_port=self.__es_port) + self.__manager_escalation_days = self.__environment_variables_dict.get('MANAGER_ESCALATION_DAYS') + self.__ldap_search = LdapSearch(self.__environment_variables_dict.get('LDAP_HOST_NAME')) + self.__global_admin_name = self.__environment_variables_dict.get('GLOBAL_CLOUD_ADMIN') + self.__mail_message = MailMessage() + self.__postfix = Postfix() + self.__ec2_operations = EC2Operations() + + @typeguard.typechecked + @logger_time_stamp + def get_tickets(self, ticket_status: str): + """ + This method return the tickets based on status + :param ticket_status: + :return: + """ + return self.__jira_operations.get_all_issues(ticket_status=ticket_status) + + @typeguard.typechecked + @logger_time_stamp + def __send_ticket_status_alerts(self, tickets: dict, ticket_status: str): + """ + This method send alert to user Ticket status if it is on New, Refinement states + Ticket States: + New - Need approval + Refinement - User didn't add the tag to the resources + :param tickets: + :param ticket_status: + :return: + """ + if ticket_status in (self.NEW, self.REFINEMENT): + user_tickets = {} + for ticket_id, description in tickets.items(): + ticket_id = ticket_id.split('-')[-1] + if self.__account in description.get('AccountName'): + if not self.__es_operations.verify_elastic_index_doc_id(index=self.es_cro_index, doc_id=ticket_id): + if ticket_status == self.REFINEMENT: + ticket_status = 'manager-approved' + source = {'cloud_name': description.get('CloudName'), 'account_name': description.get('AccountName').replace('OPENSHIFT-', ''), + 'region_name': description.get('Region'), 'user': '', + 'user_cro': description.get('EmailAddress').split('@')[0], 'user_cost': 0, 'ticket_id': ticket_id, 'ticket_id_state': ticket_status.lower(), + 'estimated_cost': description.get('CostEstimation'), 'instances_count': 0, 'monitored_days': 0, + 'ticket_opened_date': description.get('TicketOpenedDate').date(), 'duration': description.get('Days'), 'approved_manager': '', + 'user_manager': '', 'project': description.get('Project'), 'owner': f'{description.get("FirstName")} {description.get("LastName")}'.upper(), 'total_spots': 0, + 'total_ondemand': 0, 'AllocatedBudget': [], 'instances_list': [], 'instance_types_list': []} + self.__es_operations.upload_to_elasticsearch(index=self.es_cro_index, data=source, id=ticket_id) + current_date = datetime.now().date() + ticket_opened_date = description.get('TicketOpenedDate').date() + if ticket_opened_date != current_date: + user = description.get('EmailAddress').split('@')[0] + manager = description.get('ManagerApprovalAddress').split('@')[0] + cc = self.__default_admins + subject = body = to = None + ticket_opened_days = (current_date - ticket_opened_date).days + if ticket_status == self.NEW: # alert manager if didn't take any action + to = manager + extra_message = '' + if self.__manager_escalation_days <= ticket_opened_days <= self.__manager_escalation_days + 2: + manager_of_manager = self.__ldap_search.get_user_details(user_name=manager) + to = manager_of_manager.get('ManagerId', '') + extra_message = f"Your associate/Manager: [{manager}] doesn't approve this request.
The user {user} is waiting for approval for last {ticket_opened_days} days.
" \ + f"Please review the below details and approve/ reject" + elif ticket_opened_days >= self.__manager_escalation_days + 2: + to = self.__global_admin_name + extra_message = f"Missing manager approval.
The user {user} is waiting for approval for last {ticket_opened_days} days.
Please review the below details and approve/reject" + subject, body = self.__mail_message.cro_request_for_manager_approval(manager=to, request_user=user, cloud_name=self.__cloud_name, ticket_id=ticket_id, description=description, extra_message=extra_message) + else: # alert user if doesn't add tag name + user_tickets.setdefault(user, []).append(f"{ticket_id} : {description.get('Project')}") + if user_tickets: + for user, ticket_ids in user_tickets.items(): + active_instances = self.__ec2_operations.get_active_instances(ignore_tag='TicketId', tag_value=user, tag_name='User') + if active_instances: + for region, instances_list in active_instances.items(): + active_instances_ids = {region: [instance.get('InstanceId') for instance in instances_list]} + to = user + cc = self.__default_admins + subject, body = self.__mail_message.cro_send_user_alert_to_add_tags(user=user, ticket_ids=ticket_ids) + with tempfile.NamedTemporaryFile(mode='w', suffix='.json') as filename: + filename.write(json.dumps(active_instances_ids)) + filename.flush() + self.__postfix.send_email_postfix(to=to, cc=cc, subject=subject, content=body, mime_type='html', filename=filename.name) + + @typeguard.typechecked + @logger_time_stamp + def verify_es_instances_state(self, es_data: dict): + """ + This method verify the state of the es_instances + :param es_data: + :return: + """ + instance_ids = [resource.split(',')[1].strip() for resource in es_data.get('instances', []) if 'terminated' not in resource] + es_data_change = False + if instance_ids: + local_ec2_operations = EC2Operations(region=self.__region_name) + instances = local_ec2_operations.get_ec2_instance_ids(Filters=[{'Name': 'instance-id', 'Values': instance_ids}]) + instance_ids = list(set(instance_ids) - set(instances)) + for idx, resource in enumerate(es_data.get('instances')): + resource_data = resource.split(',') + instance_id = resource_data[1].strip() + if instance_id in instance_ids: + es_data_change = True + resource_data[4] = 'terminated' + es_data['instances'][idx] = ', '.join(resource_data) + return es_data_change + + @logger_time_stamp + def __track_tickets(self): + """ + This method trak the user tickets + :return: + """ + self.__send_ticket_status_alerts(ticket_status=self.NEW, tickets=self.get_tickets(ticket_status=self.NEW)) + self.__send_ticket_status_alerts(ticket_status=self.REFINEMENT, tickets=self.get_tickets(ticket_status=self.REFINEMENT)) + + def update_budget_tag_to_resources(self, region_name: str, ticket_id: str, updated_budget: int): + """ + This method updates the budget to the aws resources which have the tag TicketId: # + :param region_name: + :param ticket_id: + :param updated_budget: + :return: + """ + tag_to_be_updated = 'EstimatedCost' + tagging_operations = AWSTaggingOperations(region_name=region_name) + resources_list_to_update = tagging_operations.get_resources_list(tag_name='TicketId', tag_value=ticket_id) + if resources_list_to_update: + resource_arn_list = [] + previous_cost = 0 + for resource in resources_list_to_update: + resource_arn_list.append(resource.get('ResourceARN')) + if previous_cost == 0: + previous_cost = get_tag_value_by_name(tags=resource.get('Tags'), tag_name=tag_to_be_updated) + updated_budget += int(float(previous_cost)) + update_tags_dict = {tag_to_be_updated: str(updated_budget)} + tagging_operations.tag_resources_list(resources_list=resource_arn_list, + update_tags_dict=update_tags_dict) + else: + logger.info('No AWS resources to update the costs') + + def update_duration_tag_to_resources(self, region_name: str, ticket_id: str, updated_duration: int): + """ + This method updates the budget to cloud resources + :param region_name: + :param ticket_id: + :param updated_duration: + :return: + """ + tag_to_be_updated = 'Duration' + tagging_operations = AWSTaggingOperations(region_name=region_name) + resources_list_to_update = tagging_operations.get_resources_list(tag_name='TicketId', tag_value=ticket_id) + if resources_list_to_update: + resource_arn_list = [] + previous_duration = 0 + for resource in resources_list_to_update: + resource_arn_list.append(resource.get('ResourceARN')) + if previous_duration == 0: + previous_duration = get_tag_value_by_name(tags=resource.get('Tags'), tag_name=tag_to_be_updated) + updated_duration += int(float(previous_duration)) + update_tags_dict = {tag_to_be_updated: str(updated_duration)} + tagging_operations.tag_resources_list(resources_list=resource_arn_list, update_tags_dict=update_tags_dict) + else: + logger.info('No AWS resources to update the costs') + + @logger_time_stamp + def run(self): + """ + This method run all methods of jira tickets monitoring + :return: + # """ + self.__track_tickets() + self.monitor_tickets() diff --git a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/aws_tagging_operations.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/aws_tagging_operations.py new file mode 100644 index 00000000..f13368bb --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/aws_tagging_operations.py @@ -0,0 +1,43 @@ +from abc import ABC + +import typeguard + +from cloud_governance.cloud_resource_orchestration.clouds.common.abstract_tagging_operations import \ + AbstractTaggingOperations +from cloud_governance.common.clouds.aws.resource_tagging_api.resource_tag_api_operations import ResourceTagAPIOperations +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp + + +class AWSTaggingOperations(AbstractTaggingOperations): + """ + This class is performs the tagging operations on AWS + """ + + def __init__(self, region_name: str): + super(AbstractTaggingOperations).__init__() + self.__resource_tag_api_operations = ResourceTagAPIOperations(region_name=region_name) + + @logger_time_stamp + def tag_resources_list(self, resources_list: list, update_tags_dict: dict): + """ + This method updates the tags to the resources + :param resources_list: + :param update_tags_dict: + :return: + """ + self.__resource_tag_api_operations.tag_resources(resource_arn_list=resources_list, + update_tags_dict=update_tags_dict) + + @typeguard.typechecked + @logger_time_stamp + def get_resources_list(self, tag_name: str, tag_value: str = ''): + """ + This method returns all the resources having the tag_name and tag_value + :param tag_name: + :param tag_value: + :return: + """ + resources_list = self.__resource_tag_api_operations.get_resources(tag_name=tag_name, tag_value=tag_value) + if resources_list: + return resources_list + return [] diff --git a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/collect_cro_reports.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/collect_cro_reports.py new file mode 100644 index 00000000..d8d9092b --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/collect_cro_reports.py @@ -0,0 +1,323 @@ +import logging +from datetime import datetime, timedelta + +import typeguard + +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.cost_over_usage import CostOverUsage +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.aws_monitor_tickets import AWSMonitorTickets +from cloud_governance.common.clouds.aws.iam.iam_operations import IAMOperations +from cloud_governance.common.jira.jira_operations import JiraOperations +from cloud_governance.common.logger.init_logger import handler +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.main.environment_variables import environment_variables + + +class CollectCROReports: + """ + This method collects the user/instance-id data from the cost-explorer + """ + + DEFAULT_ROUND_DIGITS = 3 + ZERO = 0 + TICKET_ID_KEY = 'ticket_id' + COST_EXPLORER_TAGS = {TICKET_ID_KEY: 'TicketId'} + AND = 'And' + ALLOCATED_BUDGET = 'AllocatedBudget' + + def __init__(self): + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.__account_name = self.__environment_variables_dict.get('account', '').replace('OPENSHIFT-', '').strip() + self.__cost_over_usage = CostOverUsage() + self.jira_operations = JiraOperations() + self.__public_cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME', '') + self.__es_index_cro = self.__environment_variables_dict.get('CRO_ES_INDEX', '') + self.__account_id = IAMOperations().get_aws_account_id_name() + self.__ce_payer_index = self.__environment_variables_dict.get('CE_PAYER_INDEX') + + def get_account_budget_from_payer_ce_report(self): + """ + This method returns the account budget from the payer ce reports + Check policy cost_explorer_payer_billings + :return: + """ + query = { + "query": { + "bool": { + "must": [ + {"term": {"CloudName.keyword": self.__public_cloud_name}}, + {"term": {"AccountId.keyword": self.__account_id}}, + {"term": {"Month": str(datetime.utcnow().year)}}, + ] + } + }, + "size": 1 + } + response = self.__cost_over_usage.es_operations.fetch_data_by_es_query(query=query, es_index=self.__ce_payer_index, search_size=1, limit_to_size=True) + if response: + return response[0].get('_source').get(self.ALLOCATED_BUDGET) + return 0 + + def get_total_account_usage_cost(self): + """ + This method returns the total account budget till date for this year + :return: + """ + current_date = datetime.utcnow().date() + start_date = datetime(current_date.year, 1, 1).date() + cost_explorer_operations = self.__cost_over_usage.get_cost_explorer_operations() + response = cost_explorer_operations.get_cost_and_usage_from_aws(start_date=str(start_date), end_date=str(current_date+timedelta(days=1)), granularity='MONTHLY') + total_cost = cost_explorer_operations.get_filter_data(ce_data=response['ResultsByTime'], group_by=False) + return total_cost + + @typeguard.typechecked + @logger_time_stamp + def get_user_cost_data(self, group_by_tag_name: str, group_by_tag_value: str, requested_date: datetime = '', forecast: bool = False, duration: int = 0, extra_filter_key_values: dict = None): + """ + This method fetch data from the es_reports + :param extra_filter_key_values: + :param group_by_tag_value: + :param group_by_tag_name: + :param duration: + :param forecast: + :param requested_date: + :return: + """ + extra_filter_matches = [{'Tags': {'Key': group_by_tag_name, 'Values': [group_by_tag_value]}}] + if extra_filter_key_values: + extra_filter_matches.extend([{'Tags': {'Key': filter_key, 'Values': [filter_value]}} for filter_key, filter_value in extra_filter_key_values.items()]) + start_date = requested_date.replace(minute=self.ZERO, hour=self.ZERO, second=self.ZERO, microsecond=self.ZERO) + if forecast: + end_date = start_date + timedelta(days=duration) + response = self.__cost_over_usage.get_forecast_cost_data(start_date=start_date, end_date=end_date, + extra_matches=extra_filter_matches, extra_operation=self.AND, tag_name=group_by_tag_name) + return_key = 'Forecast' + else: + response = self.__cost_over_usage.get_monthly_user_es_cost_data(start_date=start_date, + end_date=datetime.utcnow().replace(microsecond=self.ZERO) + timedelta(days=1), + extra_matches=extra_filter_matches, extra_operation=self.AND, tag_name=group_by_tag_name) + return_key = 'Cost' + if response: + return round(response[self.ZERO].get(return_key), self.DEFAULT_ROUND_DIGITS) + return self.ZERO + + @typeguard.typechecked + @logger_time_stamp + def prepare_instance_data(self, instance_data: list, user: str, ticket_id: str, user_cost: float, + cost_estimation: float, ticket_opened_date: datetime): + """ + This method returns es data to upload + :param instance_data: + :param user: + :param ticket_id: + :param user_cost: + :param cost_estimation: + :param ticket_opened_date: + :return: dict data + """ + return { + 'cloud_name': self.__public_cloud_name.upper(), + 'account_name': self.__account_name, + 'region_name': instance_data[self.ZERO].get('region_name'), + 'user': user, + 'user_cro': instance_data[self.ZERO].get('user_cro'), + 'actual_cost': user_cost, + 'ticket_id': ticket_id, + 'ticket_id_state': 'in-progress', + 'estimated_cost': cost_estimation, + 'total_instances': len(instance_data), + 'monitored_days': (datetime.utcnow().date() - ticket_opened_date.date()).days, + 'ticket_opened_date': ticket_opened_date.date(), + 'duration': int(instance_data[self.ZERO].get('duration')), + 'approved_manager': instance_data[self.ZERO].get('approved_manager'), + 'user_manager': instance_data[self.ZERO].get('manager'), + 'project': instance_data[self.ZERO].get('project'), + 'owner': instance_data[self.ZERO].get('owner'), + 'total_spots': len([instance for instance in instance_data if instance.get('instance_plan').lower() == 'spot']), + 'total_ondemand': len([instance for instance in instance_data if instance.get('instance_plan').lower() == 'ondemand']), + self.ALLOCATED_BUDGET: self.get_account_budget_from_payer_ce_report(), + 'instances': [f"{instance.get('instance_name')}, {instance.get('instance_id')}, " + f"{instance.get('instance_plan')}, " + f"{instance.get('instance_type')}, " + f"{instance.get('instance_state')}, {instance.get('instance_running_days')}" for instance in instance_data], + 'instance_types': [instance.get('instance_type') for instance in instance_data] + } + + @typeguard.typechecked + @logger_time_stamp + def __prepare_update_es_data(self, source: dict, instance_data: list, user_cost: float, cost_estimation: float): + """ + This method update the values of jira id data + :param source: + :param instance_data: + :param user_cost: + :param cost_estimation: + :return: dict data + """ + for instance in instance_data: + index = [idx for idx, es_instance in enumerate(source.get('instances', [])) if instance.get('instance_id') in es_instance] + running_days = instance.get('instance_running_days') + if index: + source['instances'][index[self.ZERO]] = f"{instance.get('instance_name')}, {instance.get('instance_id')}, " \ + f"{instance.get('instance_plan')}, {instance.get('instance_type')}, " \ + f"{instance.get('instance_state')}, {running_days}" + else: + source.setdefault('instances', []).append(f"{instance.get('instance_name')}, {instance.get('instance_id')}, " + f"{instance.get('instance_plan')}, {instance.get('instance_type')}, " + f"{instance.get('instance_state')}, {running_days}") + source.setdefault('instance_types', []).append(instance.get('instance_type')) + if instance.get('instance_plan', '').lower() == 'spot': + source['total_spots'] = source.get('total_spots', 0) + 1 + else: + if instance.get('instance_plan', '').lower() == 'ondemand': + source['total_ondemand'] = source.get('total_ondemand', 0) + 1 + AWSMonitorTickets().verify_es_instances_state(es_data=source) + if datetime.strptime(source.get('timestamp'), "%Y-%m-%dT%H:%M:%S.%f").date() != datetime.now().date(): + source['monitored_days'] = (datetime.utcnow().date() - source.get('ticket_opened_date')).days + source['total_instances'] = len(source.get('instances', self.ZERO)) + source['duration'] = int(instance_data[self.ZERO].get('duration')) + source['estimated_cost'] = round(cost_estimation, self.DEFAULT_ROUND_DIGITS) + source['actual_cost'] = user_cost + if instance_data[self.ZERO].get('user_cro') and source.get('user_cro') != instance_data[self.ZERO].get('user_cro'): + source['user_cro'] = instance_data[self.ZERO].get('user_cro') + if instance_data[self.ZERO].get('user') and source.get('user') != instance_data[self.ZERO].get('user'): + source['user'] = instance_data[self.ZERO].get('user') + source['timestamp'] = datetime.utcnow() + if source.get('ticket_id_state') != 'in-progress': + source['ticket_id_state'] = 'in-progress' + source['approved_manager'] = instance_data[self.ZERO].get('approved_manager') + source['user_manager'] = instance_data[self.ZERO].get('manager'), + source['user_manager'] = instance_data[self.ZERO].get('manager'), + source[self.ALLOCATED_BUDGET] = self.get_account_budget_from_payer_ce_report() + return source + + @typeguard.typechecked + @logger_time_stamp + def __upload_cro_report_to_es(self, monitor_data: dict): + """ + This method uploads the data to elastic search index and return the data + :param monitor_data: + :return: + """ + upload_data = {} + for ticket_id, instance_data in monitor_data.items(): + ticket_id = ticket_id.split('-')[-1] + user = instance_data[self.ZERO].get('user') + user_project = instance_data[self.ZERO].get('project') + issue_description = self.jira_operations.get_issue_description(ticket_id=ticket_id, state='ANY') + ticket_opened_date = issue_description.get('TicketOpenedDate') + group_by_tag_name = self.COST_EXPLORER_TAGS[self.TICKET_ID_KEY] + user_cost = self.get_user_cost_data(group_by_tag_name=group_by_tag_name, group_by_tag_value=ticket_id, + requested_date=ticket_opened_date, + extra_filter_key_values={'Project': user_project}) + duration = int(instance_data[self.ZERO].get('duration', 0)) + user_forecast = self.get_user_cost_data(group_by_tag_name=group_by_tag_name, group_by_tag_value=ticket_id, requested_date=datetime.utcnow(), extra_filter_key_values={'Project': user_project}, forecast=True, duration=duration) + cost_estimation = float(instance_data[self.ZERO].get('estimated_cost', self.ZERO)) + if self.__cost_over_usage.es_operations.verify_elastic_index_doc_id(index=self.__cost_over_usage.es_index_cro, doc_id=ticket_id): + es_data = self.__cost_over_usage.es_operations.get_es_data_by_id(id=ticket_id, index=self.__cost_over_usage.es_index_cro) + es_data['_source']['ticket_opened_date'] = ticket_opened_date.date() + es_data['_source']['forecast'] = user_forecast + es_data['_source']['user'] = user + source = self.__prepare_update_es_data(source=es_data.get('_source'), instance_data=instance_data, cost_estimation=cost_estimation, user_cost=user_cost) + self.__cost_over_usage.es_operations.update_elasticsearch_index(index=self.__es_index_cro, id=ticket_id, metadata=source) + upload_data[ticket_id] = source + else: + if ticket_id not in upload_data: + source = self.prepare_instance_data(instance_data=instance_data, ticket_id=ticket_id, cost_estimation=cost_estimation, user=user, user_cost=user_cost, ticket_opened_date=ticket_opened_date) + source['ticket_opened_date'] = ticket_opened_date.date() + source['forecast'] = user_forecast + source['user'] = user + if not source.get(self.ALLOCATED_BUDGET): + source[self.ALLOCATED_BUDGET] = self.get_account_budget_from_payer_ce_report() + self.__cost_over_usage.es_operations.upload_to_elasticsearch(index=self.__es_index_cro, data=source, id=ticket_id) + upload_data[ticket_id] = source + return upload_data + + @logger_time_stamp + def update_in_progress_ticket_cost(self): + """ + This method updates the in-progress tickets costs + :return: + """ + query = {"query": {"bool": {"must": [ + {"term": {"cloud_name.keyword": self.__public_cloud_name}}, + {"term": {"account_name.keyword": self.__account_name.upper()}}, + {"term": {"ticket_id_state.keyword": "in-progress"}} + ] + }}} + in_progress_es_tickets = self.__cost_over_usage.es_operations.fetch_data_by_es_query(query=query, es_index=self.__es_index_cro) + total_account_cost = self.get_total_account_usage_cost() + for in_progress_ticket in in_progress_es_tickets: + source_data = in_progress_ticket.get('_source') + ticket_id = source_data.get(self.TICKET_ID_KEY) + if source_data.get('account_name').lower() in self.__account_name.lower(): + ticket_opened_date = datetime.strptime(source_data.get('ticket_opened_date'), "%Y-%m-%d") + duration = int(source_data.get('duration', 0)) + group_by_tag_name = self.COST_EXPLORER_TAGS[self.TICKET_ID_KEY] + user_cost = self.get_user_cost_data(group_by_tag_name=group_by_tag_name, group_by_tag_value=ticket_id, requested_date=ticket_opened_date) + user_daily_cost = eval(source_data.get('user_daily_cost', "{}")) + user_name = source_data.get('user') + ce_user_daily_report = self.__get_user_daily_usage_report(days=4, group_by_tag_value=ticket_id, + group_by_tag_name=group_by_tag_name, + user_name=user_name) + user_daily_cost.update(ce_user_daily_report) + user_forecast = self.get_user_cost_data(group_by_tag_name=group_by_tag_name, + group_by_tag_value=ticket_id, requested_date=datetime.utcnow(), + forecast=True, duration=duration) + update_data = {'actual_cost': user_cost, 'forecast': user_forecast, 'timestamp': datetime.utcnow(), + f'TotalCurrentUsage-{datetime.utcnow().year}': total_account_cost, + 'user_daily_cost': str(user_daily_cost)} + if not source_data.get(self.ALLOCATED_BUDGET): + update_data[self.ALLOCATED_BUDGET] = self.get_account_budget_from_payer_ce_report() + self.__cost_over_usage.es_operations.update_elasticsearch_index(index=self.__es_index_cro, metadata=update_data, id=ticket_id) + + def __get_user_daily_usage_report(self, days: int, group_by_tag_name: str, group_by_tag_value: str, user_name: str): + """ + This method returns the users daily report from last X days + :param days: + :return: + """ + user_daily_usage_report = {} + self.__get_user_usage_by_granularity(tag_name=group_by_tag_name, tag_value=group_by_tag_value, + days=days, + result_back_data=user_daily_usage_report) + self.__get_user_usage_by_granularity(tag_name='User', tag_value=user_name, + days=days, result_back_data=user_daily_usage_report) + return user_daily_usage_report + + def __get_user_usage_by_granularity(self, result_back_data: dict, tag_name: str, days: int, tag_value): + """ + This method returns the organized input of the usage_reports + :param result_back_data: + :param tag_name: + :param days: + :param tag_value: + :return: + """ + end_date = datetime.utcnow().date() + start_date = end_date - timedelta(days=days) + cost_explorer_object = self.__cost_over_usage.get_cost_explorer_operations() + ce_daily_usage = cost_explorer_object.get_cost_by_tags(tag=tag_name, + granularity='DAILY', + start_date=str(start_date), + end_date=str(end_date), + Filter={'Tags': {'Key': tag_name, 'Values': [tag_value]}}) + filtered_ce_daily_usage = cost_explorer_object.get_ce_report_filter_data(ce_daily_usage, + tag_name=tag_name) + for index_id, daily_cost in filtered_ce_daily_usage.items(): + start_date = daily_cost.get('start_date') + usage = round(float(daily_cost.get(tag_name)), self.DEFAULT_ROUND_DIGITS) + result_back_data.setdefault(start_date, {}).update({tag_name: usage }) + + @typeguard.typechecked + @logger_time_stamp + def run(self, monitor_data: dict): + """ + This method runs data collection methods + :param monitor_data: + :return: + """ + handler.setLevel(logging.WARN) + result = self.__upload_cro_report_to_es(monitor_data=monitor_data) + handler.setLevel(logging.WARN) + return result + diff --git a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/cost_over_usage.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/cost_over_usage.py new file mode 100644 index 00000000..4a00ded2 --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/cost_over_usage.py @@ -0,0 +1,281 @@ +import logging +from datetime import datetime, timedelta + +import typeguard + +from cloud_governance.common.clouds.aws.cost_explorer.cost_explorer_operations import CostExplorerOperations +from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations +from cloud_governance.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations +from cloud_governance.common.ldap.ldap_search import LdapSearch +from cloud_governance.common.logger.init_logger import logger, handler +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.common.mails.mail_message import MailMessage +from cloud_governance.common.mails.postfix import Postfix +from cloud_governance.main.environment_variables import environment_variables + + +class CostOverUsage: + """ + This class will monitors the cost explorer reports and sends alert to the user if they exceed specified amount + """ + + DEFAULT_ROUND_DIGITS = 3 + SEND_ALERT_DAY = 3 + FORECAST_GRANULARITY = 'MONTHLY' + FORECAST_COST_METRIC = 'UNBLENDED_COST' + OVER_USAGE_THRESHOLD = 0.05 + CLOUD_GOVERNANCE_ES_MAIL_INDEX = 'cloud-governance-mail-messages' + CRO_OVER_USAGE_ALERT = 'cro-over-usage-alert' + TIMESTAMP_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" + + def __init__(self): + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.__aws_account = self.__environment_variables_dict.get('account', '').replace('OPENSHIFT-', '').strip() + self.__postfix_mail = Postfix() + self.__mail_message = MailMessage() + self.__es_host = self.__environment_variables_dict.get('es_host', '') + self.__es_port = self.__environment_variables_dict.get('es_port', '') + self.__over_usage_amount = self.__environment_variables_dict.get('CRO_COST_OVER_USAGE', '') + self.__es_ce_reports_index = self.__environment_variables_dict.get('USER_COST_INDEX', '') + self.__ldap_search = LdapSearch(ldap_host_name=self.__environment_variables_dict.get('LDAP_HOST_NAME', '')) + self.__cro_admins = self.__environment_variables_dict.get('CRO_DEFAULT_ADMINS', []) + self.es_index_cro = self.__environment_variables_dict.get('CRO_ES_INDEX', '') + self.__cro_duration_days = self.__environment_variables_dict.get('CRO_DURATION_DAYS') + self.current_end_date = datetime.utcnow() + self.current_start_date = self.current_end_date - timedelta(days=self.__cro_duration_days) + self.__public_cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME') + self.__ce_operations = CostExplorerOperations() + self.es_operations = ElasticSearchOperations(es_host=self.__es_host, es_port=self.__es_port) + self.__over_usage_threshold = self.OVER_USAGE_THRESHOLD * self.__over_usage_amount + self.__ec2_operations = EC2Operations() + + def get_cost_explorer_operations(self): + return self.__ce_operations + + @typeguard.typechecked + @logger_time_stamp + def get_cost_based_on_tag(self, start_date: str, end_date: str, tag_name: str, extra_filters: any = None, extra_operation: str = 'And', granularity: str = None, forecast: bool = False): + """ + This method gives the cost based on the tag_name + :param forecast: + :param granularity: + :param extra_operation: default, And + :param extra_filters: + :param tag_name: + :param start_date: + :param end_date: + :return: + """ + # remove_savings_cost = { # removed the savings plan usage from the user costs + # 'Not': { + # 'Dimensions': { + # 'Key': 'RECORD_TYPE', + # 'Values': ['SavingsPlanRecurringFee', 'SavingsPlanNegation', 'SavingsPlanCoveredUsage'] + # } + # } + # } + Filters = {} #remove_savings_cost + if extra_filters: + if type(extra_filters) == list: + if len(extra_filters) == 1: + Filters = extra_filters[0] + else: + Filters = { + extra_operation: [ + *extra_filters, + # remove_savings_cost + ] + } + else: + Filters = { + # extra_operation: [ + extra_filters + # remove_savings_cost + # ] + } + if forecast: + results_by_time = self.__ce_operations.get_cost_forecast(start_date=start_date, end_date=end_date, granularity=self.FORECAST_GRANULARITY, cost_metric=self.FORECAST_COST_METRIC, Filter=Filters)['Total'] + response = [{'Forecast': round(float(results_by_time.get('Amount')), self.DEFAULT_ROUND_DIGITS)}] + else: + results_by_time = self.__ce_operations.get_cost_by_tags(start_date=start_date, end_date=end_date, tag=tag_name, Filter=Filters, granularity=granularity)['ResultsByTime'] + response = self.__ce_operations.get_filter_data(ce_data=results_by_time, tag_name=tag_name) + return response + + @typeguard.typechecked + @logger_time_stamp + def __get_start_end_dates(self, start_date: datetime = None, end_date: datetime = None): + """ + This method returns the start_date and end_date + :param start_date: + :param end_date: + :return: + """ + if not start_date and not end_date: + end_date = self.current_end_date.date() + start_date = self.current_start_date.date() + elif not start_date: + start_date = self.current_start_date.date() + end_date = end_date.date() + else: + if not end_date: + end_date = self.current_end_date.date() + else: + end_date = end_date.date() + start_date = start_date.date() + return start_date, end_date + + @typeguard.typechecked + @logger_time_stamp + def get_monthly_user_es_cost_data(self, tag_name: str = 'User', start_date: datetime = None, end_date: datetime = None, extra_matches: any = None, granularity: str = 'MONTHLY', extra_operation: str = 'And'): + """ + This method gets the user cost from the es-data + :param tag_name: by default User + :param start_date: + :param end_date: + :param extra_matches: + :param granularity: by default MONTHLY + :param extra_operation: + :return: + """ + start_date, end_date = self.__get_start_end_dates(start_date=start_date, end_date=end_date) + return self.get_cost_based_on_tag(start_date=str(start_date), end_date=str(end_date), tag_name=tag_name, granularity=granularity, extra_filters=extra_matches, extra_operation=extra_operation) + + def get_forecast_cost_data(self, tag_name: str = 'User', start_date: datetime = None, end_date: datetime = None, extra_matches: any = None, granularity: str = 'MONTHLY', extra_operation: str = 'And'): + """ + This method returns the forecast based on inputs + :param tag_name: by default User + :param start_date: + :param end_date: + :param extra_matches: + :param granularity: by default MONTHLY + :param extra_operation: + :return: + """ + start_date, end_date = self.__get_start_end_dates(start_date=start_date, end_date=end_date) + return self.get_cost_based_on_tag(start_date=str(start_date), end_date=str(end_date), tag_name=tag_name, granularity=granularity, extra_filters=extra_matches, extra_operation=extra_operation, forecast=True) + + def get_user_active_ticket_costs(self, user_name: str): + """ + This method returns a boolean indicating whether the user should open the ticket or not + :param user_name: + :return: + """ + query = { # check user opened the ticket in elastic_search + "query": { + "bool": { + "must": [{"term": {"user_cro.keyword": user_name}}, + {"terms": {"ticket_id_state.keyword": ['new', 'manager-approved', 'in-progress']}}, + {"term": {"account_name.keyword": self.__aws_account.upper()}} + ], + "filter": { + "range": { + "timestamp": { + "format": "yyyy-MM-dd", + "lte": str(self.current_end_date.date()), + "gte": str(self.current_start_date.date()), + } + } + } + } + } + } + user_active_tickets = self.es_operations.fetch_data_by_es_query(es_index=self.es_index_cro, query=query) + if not user_active_tickets: + return None + else: + total_active_ticket_cost = 0 + for cro_data in user_active_tickets: + opened_ticket_cost = float(cro_data.get('_source').get('estimated_cost')) + total_active_ticket_cost += opened_ticket_cost + return total_active_ticket_cost + + @logger_time_stamp + def get_cost_over_usage_users(self): + """ + This method returns the cost over usage users which are not opened ticket + :return: + """ + over_usage_users = [] + current_month_users = self.get_monthly_user_es_cost_data() + for user in current_month_users: + user_name = str(user.get('User')) + user_cost = round(user.get('Cost'), self.DEFAULT_ROUND_DIGITS) + if user_cost >= (self.__over_usage_amount - self.__over_usage_threshold): + user_active_tickets_cost = self.get_user_active_ticket_costs(user_name=user_name) + if not user_active_tickets_cost: + over_usage_users.append(user) + else: + user_cost_without_active_ticket = user_cost - user_active_tickets_cost + if user_cost_without_active_ticket > self.__over_usage_amount: + user['Cost'] = user_cost_without_active_ticket + over_usage_users.append(user) + return over_usage_users + + @logger_time_stamp + def send_alerts_to_over_usage_users(self): + """ + This method send alerts to cost over usage users + Send alert to users every 3rd day, if not open ticket + :return: + """ + users_list = self.get_cost_over_usage_users() + alerted_users = [] + for row in users_list: + user, cost, project = row.get('User'), row.get('Cost'), row.get('Project', '') + # send_alert, alert_number = self.get_last_mail_alert_status(user=str(user)) + # if send_alert: + alerted_users.append(user) + cc = [*self.__cro_admins] + user_details = self.__ldap_search.get_user_details(user_name=user) + if user_details: + if self.__ec2_operations.verify_active_instances(tag_name='User', tag_value=str(user)): + name = f'{user_details.get("FullName")}' + cc.append(user_details.get('managerId')) + subject, body = self.__mail_message.cro_cost_over_usage(CloudName=self.__public_cloud_name, + OverUsageCost=self.__over_usage_amount, + FullName=name, Cost=cost, Project=project, to=user) + es_data = {'Alert': 1, 'MissingUserTicketCost': cost} + handler.setLevel(logging.WARN) + self.__postfix_mail.send_email_postfix(to=user, cc=[], content=body, subject=subject, mime_type='html', es_data=es_data, message_type=self.CRO_OVER_USAGE_ALERT) + handler.setLevel(logging.INFO) + return alerted_users + + def get_last_mail_alert_status(self, user: str): + """ + This method return the last mail alert. + :param user: + :return: + """ + query = { + "query": { + "bool": { + "must": [ + {"term": {"To.keyword": user}}, + {"term": {"MessageType.keyword": self.CRO_OVER_USAGE_ALERT}}, + ] + } + }, + "size": 1, + "sort": {"timestamp": "desc"} + } + response = self.es_operations.fetch_data_by_es_query(query=query, es_index=self.CLOUD_GOVERNANCE_ES_MAIL_INDEX, search_size=1, limit_to_size=True) + if response: + last_alert = response[0] + last_send_date = last_alert.get('_source').get('timestamp') + alert_number = last_alert.get('_source').get('Alert', 0) + current_date = datetime.utcnow().date() + last_send_date = datetime.strptime(last_send_date, self.TIMESTAMP_DATE_FORMAT).date() + days = (current_date - last_send_date).days + if days % self.SEND_ALERT_DAY == 0 and last_send_date != current_date: + return True, alert_number + logger.warning(f"Already sent mail on {last_send_date} to {user}") + return False, alert_number + return True, 0 + + @logger_time_stamp + def run(self): + """ + This method runs the cost over usage methods + :return: + """ + return self.send_alerts_to_over_usage_users() diff --git a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/monitor_cro_instances.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/monitor_cro_instances.py new file mode 100644 index 00000000..d4fea90b --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/monitor_cro_instances.py @@ -0,0 +1,63 @@ +from datetime import datetime + +from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.main.environment_variables import environment_variables + + +class MonitorCROInstances: + """ + This class monitor cro instances + """ + + def __init__(self, region_name: str = ''): + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.__region_name = region_name if region_name else self.__environment_variables_dict.get('AWS_DEFAULT_REGION') + self.__ec2_operations = EC2Operations(region=self.__region_name) + self.__cro_resource_tag_name = self.__environment_variables_dict.get('CRO_RESOURCE_TAG_NAME') + + @logger_time_stamp + def __monitor_instances(self): + """ + This method monitor instances and returns the data + :return: + """ + monitored_ticket_ids = {} + filters = {'Filters': [{'Name': 'tag-key', 'Values': ['Duration']}]} + instances = self.__ec2_operations.get_ec2_instance_list(**filters) + for resource in instances: + tags = resource.get('Tags') + create_time = self.__ec2_operations.get_attached_time(volume_list=resource.get('BlockDeviceMappings')) + if not create_time: + create_time = resource.get('LaunchTime') + ticket_id = self.__ec2_operations.get_tag_value_from_tags(tag_name=self.__cro_resource_tag_name, tags=tags) + running_days = (datetime.now().date() - create_time.date()).days + monitored_ticket_ids.setdefault(ticket_id, []).append({ + 'region_name': self.__region_name, + 'ticket_id': ticket_id, + 'instance_id': resource.get('InstanceId'), + 'instance_create_time': create_time, + 'instance_state': resource.get('State')['Name'], + 'instance_type': resource.get('InstanceType'), + 'instance_running_days': running_days, + 'instance_plan': resource.get('InstanceLifecycle', 'ondemand'), + 'user_cro': self.__ec2_operations.get_tag_value_from_tags(tag_name='UserCRO', tags=tags), + 'user': self.__ec2_operations.get_tag_value_from_tags(tag_name='User', tags=tags), + 'manager': self.__ec2_operations.get_tag_value_from_tags(tag_name='Manager', tags=tags), + 'approved_manager': self.__ec2_operations.get_tag_value_from_tags(tag_name='ApprovedManager', tags=tags), + 'owner': self.__ec2_operations.get_tag_value_from_tags(tag_name='Owner', tags=tags), + 'project': self.__ec2_operations.get_tag_value_from_tags(tag_name='Project', tags=tags), + 'instance_name': self.__ec2_operations.get_tag_value_from_tags(tag_name='Name', tags=tags), + 'email': self.__ec2_operations.get_tag_value_from_tags(tag_name='Email', tags=tags), + 'duration': self.__ec2_operations.get_tag_value_from_tags(tag_name='Duration', tags=tags, cast_type='int'), + 'estimated_cost': self.__ec2_operations.get_tag_value_from_tags(tag_name='EstimatedCost', tags=tags, cast_type='float') + }) + return monitored_ticket_ids + + @logger_time_stamp + def run(self): + """ + This method run the monitoring methods + :return: + """ + return self.__monitor_instances() diff --git a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/run_cro.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/run_cro.py new file mode 100644 index 00000000..6157e7f3 --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/run_cro.py @@ -0,0 +1,108 @@ +from datetime import datetime + +import boto3 + +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.collect_cro_reports import CollectCROReports +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.cost_over_usage import CostOverUsage +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.monitor_cro_instances import MonitorCROInstances +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.aws_monitor_tickets import AWSMonitorTickets +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.tag_cro_instances import TagCROInstances +from cloud_governance.common.logger.init_logger import logger +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.main.environment_variables import environment_variables + + +class RunCRO: + + PERSISTENT_RUN_DOC_ID = f'cro_run_persistence-{datetime.utcnow().date()}' + PERSISTENT_RUN_INDEX = 'cloud_resource_orchestration_persistence_run' + + def __init__(self): + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.cro_cost_over_usage = CostOverUsage() + self.cro_reports = CollectCROReports() + self.aws_monitor_tickets = AWSMonitorTickets() + self.account = self.__environment_variables_dict.get('account', '').replace('OPENSHIFT-', '').lower().strip() + self.__run_active_regions = self.__environment_variables_dict.get('RUN_ACTIVE_REGIONS') + self.__region = self.__environment_variables_dict.get('AWS_DEFAULT_REGION', '') + + @logger_time_stamp + def send_cro_alerts(self): + """ + This method send the cost_over_usage alert and Ticket status alerts + :return: + """ + es_data = self.cro_cost_over_usage.es_operations.get_es_data_by_id(index=self.PERSISTENT_RUN_INDEX, id=self.PERSISTENT_RUN_DOC_ID) + first_run = True + if es_data: + source = es_data.get('_source') + last_run_time = source.get(f'last_run_{self.account.lower()}') + if last_run_time: + last_updated_time = datetime.strptime(last_run_time, "%Y-%m-%dT%H:%M:%S.%f").date() + if last_updated_time == datetime.utcnow().date(): + first_run = False + self.__environment_variables_dict.update({'CRO_FIRST_RUN': first_run}) + if first_run: + cost_over_usage_users = self.cro_cost_over_usage.run() + logger.info(f'Cost Over Usage Users list: {", ".join(cost_over_usage_users)}') + self.aws_monitor_tickets.run() + self.cro_reports.update_in_progress_ticket_cost() + self.save_current_timestamp() + + @logger_time_stamp + def save_current_timestamp(self): + """ + This method saves the current timestamp + Storing timestamp for not sending multiple alerts in a day, if we run any number of times + :return: + """ + if not self.cro_cost_over_usage.es_operations.verify_elastic_index_doc_id(index=self.PERSISTENT_RUN_INDEX, doc_id=self.PERSISTENT_RUN_DOC_ID): + self.cro_cost_over_usage.es_operations.upload_to_elasticsearch(index=self.PERSISTENT_RUN_INDEX, data={f'last_run_{self.account}': datetime.utcnow()}, id=self.PERSISTENT_RUN_DOC_ID) + else: + self.cro_cost_over_usage.es_operations.update_elasticsearch_index(index=self.PERSISTENT_RUN_INDEX, metadata={f'last_run_{self.account}': datetime.utcnow()}, id=self.PERSISTENT_RUN_DOC_ID) + + @logger_time_stamp + def run_cloud_resources(self): + """ + This method run the aws resources in specified region or all regions + :return: + """ + if self.__run_active_regions: + active_regions = [region.get('RegionName') for region in boto3.client('ec2').describe_regions()['Regions']] + logger.info(f"""*****Running CloudResourceOrchestration in all Active regions: {active_regions}*****""") + else: + active_regions = [self.__region] + for active_region in active_regions: + cro_monitor = MonitorCROInstances(region_name=active_region) + cro_tagging = TagCROInstances(region_name=active_region) + self.__environment_variables_dict.update({'AWS_DEFAULT_REGION': active_region}) + logger.info(f"""Running CloudResourceOrchestration in region: {active_region}""") + logger.info(f"""{active_region}: -> Running CRO Tagging""") + tagging_response = cro_tagging.run() + logger.info(f'Tagged instances : {tagging_response}') + logger.info(f"""{active_region}: -> Running CRO Resource data Collection""") + monitor_response = cro_monitor.run() + if monitor_response: + cro_reports = self.cro_reports.run(monitor_response) + logger.info(f'Cloud Orchestration Resources: {cro_reports}') + + @logger_time_stamp + def start_cro(self): + """ + This method start the cro process methods + 1. Send alert to cost over usage users + 2. Tag the new instances + 3. monitor and upload the new instances' data + 4. Monitor the Jira ticket progressing + :return: + """ + self.send_cro_alerts() + self.run_cloud_resources() + + @logger_time_stamp + def run(self): + """ + This method start the aws CRO operations + :return: + """ + self.start_cro() diff --git a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/tag_cro_instances.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/tag_cro_instances.py new file mode 100644 index 00000000..e923e128 --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/tag_cro_instances.py @@ -0,0 +1,147 @@ +import boto3 +import typeguard + +from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations +from cloud_governance.common.jira.jira_operations import JiraOperations +from cloud_governance.common.ldap.ldap_search import LdapSearch +from cloud_governance.common.logger.init_logger import logger +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.main.environment_variables import environment_variables +from cloud_governance.policy.policy_operations.aws.tag_cluster.tag_cluster_operations import TagClusterOperations + + +class TagCROInstances: + """ + This class manages the tagging instances which have the tag TicketId + """ + KEY = 'Key' + VALUE = 'Value' + NA_USER = 'NA' + EMPTY_USER = '' + + def __init__(self, region_name: str = ''): + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.__region_name = region_name if region_name else self.__environment_variables_dict.get('AWS_DEFAULT_REGION') + self.__cro_resource_tag_name = self.__environment_variables_dict.get('CRO_RESOURCE_TAG_NAME') + self.__ec2_client = boto3.client('ec2', region_name=self.__region_name) + self.__ec2_operations = EC2Operations(region=self.__region_name) + self.jira_operations = JiraOperations() + self.__ldap_search = LdapSearch(ldap_host_name=self.__environment_variables_dict.get('LDAP_HOST_NAME', '')) + self.__replace_user_names = self.__environment_variables_dict.get('CRO_REPLACED_USERNAMES') + self.__tag_cluster_operations = TagClusterOperations(region=self.__region_name) + + @typeguard.typechecked + @logger_time_stamp + def __get_instance_volumes(self, block_device_mappings: list): + """ + This method returns the instance volumes + :param block_device_mappings: + :return: + """ + volumes_list = [] + for mapping in block_device_mappings: + if mapping.get('Ebs').get('VolumeId'): + volumes_list.append(mapping.get('Ebs').get('VolumeId')) + return volumes_list + + @typeguard.typechecked + @logger_time_stamp + def __get_ldap_user_data(self, user: str, tag_name: str): + """ + This method returns the ldap user tag_name + :param user: + :param tag_name: + :return: + """ + user_details = self.__ldap_search.get_user_details(user) + if user_details: + return user_details.get(tag_name) + return self.NA_USER + + @logger_time_stamp + def __tag_ticket_id_attach_instance(self, ticket_id: str, instance_id: str, volume_ids: list, user: str): + """ + This method tag the instances which have the tag TicketId + :param ticket_id: + :param instance_id: + :param volume_ids: + :return: + """ + ticket_id = ticket_id.split('-')[-1] + ticket_description = self.jira_operations.get_issue_description(ticket_id=ticket_id, state='INPROGRESS') + if ticket_description: + duration = int(ticket_description.get('Days', 0)) + extended_duration = int(self.jira_operations.get_issue_sub_tasks_duration(ticket_id=ticket_id)) + duration += extended_duration + estimated_cost = float(ticket_description.get('CostEstimation')) + budget_extend_ticket_ids = self.jira_operations.get_budget_extend_tickets(ticket_id=ticket_id, ticket_state='closed') + extended_budget = self.jira_operations.get_total_extend_budget(sub_ticket_ids=budget_extend_ticket_ids) + estimated_cost = int(estimated_cost) + int(extended_budget) + manager_approved = ticket_description.get('ApprovedManager') + if not manager_approved: + manager_approved = ticket_description.get('ManagerApprovalAddress') + user_email = ticket_description.get('EmailAddress') + user = user_email.split('@')[0] + project = ticket_description.get('Project') + tags = [{self.KEY: 'Duration', self.VALUE: str(duration)}, + {self.KEY: 'EstimatedCost', self.VALUE: str(estimated_cost)}, + {self.KEY: 'ApprovedManager', self.VALUE: manager_approved}, + {self.KEY: 'Project', self.VALUE: project.upper()}, + {self.KEY: 'Email', self.VALUE: user_email}, + {self.KEY: self.__cro_resource_tag_name, self.VALUE: ticket_id}, + {self.KEY: 'UserCRO', self.VALUE: user}, + {self.KEY: 'Manager', self.VALUE: self.__get_ldap_user_data(user, "ManagerName").upper()}, + {self.KEY: 'Owner', self.VALUE: self.__get_ldap_user_data(user, "FullName").upper()}] + if user: + tags.append({self.KEY: 'User', self.VALUE: user}) + self.__ec2_operations.tag_ec2_resources(client_method=self.__ec2_client.create_tags, resource_ids=[instance_id], tags=tags) + if ticket_description.get('JiraStatus') != self.jira_operations.IN_PROGRESS: + self.jira_operations.move_issue_state(ticket_id=ticket_id, state='inprogress') + logger.info(f'Extra tags are added to the instances: {instance_id}, had an ticket_id: {ticket_id}') + if volume_ids: + try: + self.__ec2_operations.tag_ec2_resources(client_method=self.__ec2_client.create_tags, resource_ids=volume_ids, tags=tags) + logger.info(f'Tagged the instance: {instance_id} attached volumes {volume_ids}') + except Exception as err: + logger.error(err) + return True + return False + + @logger_time_stamp + def __tag_instances(self): + """ + This method list the instances and tag the instances which have the tag TicketId + :return: + """ + ticket_id_instances = {} + instances = self.__ec2_operations.get_ec2_instance_list() + for resource in instances: + instance_id = resource.get('InstanceId') + user = self.__ec2_operations.get_tag_value_from_tags(tags=resource.get('Tags'), tag_name='User') + if not user: + user = self.__tag_cluster_operations.get_username(start_time=resource.get('LaunchTime'), resource_id=instance_id, resource_type='AWS::EC2::Instance', tags=resource.get('Tags')) + ticket_id = self.__ec2_operations.get_tag_value_from_tags(tags=resource.get('Tags'), tag_name=self.__cro_resource_tag_name) if resource.get('Tags') else None + if ticket_id: + duration = self.__ec2_operations.get_tag_value_from_tags(tags=resource.get('Tags'), tag_name='Duration') + if not duration: + volume_ids = self.__get_instance_volumes(resource.get('BlockDeviceMappings')) + if self.__tag_ticket_id_attach_instance(ticket_id=ticket_id, instance_id=instance_id, volume_ids=volume_ids, user=user): + ticket_id_instances.setdefault(ticket_id, []).append(instance_id) + user = self.__ec2_operations.get_tag_value_from_tags(tags=resource.get('Tags'), tag_name='User') + tag_user = False + if user in [*self.__replace_user_names, self.NA_USER]: + tag_user = True + if tag_user: + user_cro = self.__ec2_operations.get_tag_value_from_tags(tags=resource.get('Tags'), tag_name='UserCRO') + if user_cro: + volume_ids = self.__get_instance_volumes(resource.get('BlockDeviceMappings')) + self.__ec2_operations.tag_ec2_resources(client_method=self.__ec2_client.create_tags, resource_ids=[instance_id, *volume_ids], tags=[{self.KEY: 'User', self.VALUE: user_cro}]) + return ticket_id_instances + + @logger_time_stamp + def run(self): + """ + This method run the tag instance methods + :return: + """ + return self.__tag_instances() diff --git a/tests/unittest/cloud_resource_orchestration/__init__.py b/cloud_governance/cloud_resource_orchestration/clouds/common/__init__.py similarity index 100% rename from tests/unittest/cloud_resource_orchestration/__init__.py rename to cloud_governance/cloud_resource_orchestration/clouds/common/__init__.py diff --git a/cloud_governance/cloud_resource_orchestration/clouds/common/abstract_tagging_operations.py b/cloud_governance/cloud_resource_orchestration/clouds/common/abstract_tagging_operations.py new file mode 100644 index 00000000..7508ce0d --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/clouds/common/abstract_tagging_operations.py @@ -0,0 +1,18 @@ +from abc import ABC, abstractmethod + + +class AbstractTaggingOperations(ABC): + """ + This class is abstract tagging operations to all the clouds + """ + + def __init__(self): + super().__init__() + + @abstractmethod + def get_resources_list(self, tag_name: str, tag_value: str = ''): + raise NotImplementedError() + + @abstractmethod + def tag_resources_list(self, resources_list: list, update_tags_dict: dict): + raise NotImplementedError() diff --git a/cloud_governance/cloud_resource_orchestration/common/abstract_monitor_tickets.py b/cloud_governance/cloud_resource_orchestration/common/abstract_monitor_tickets.py new file mode 100644 index 00000000..51c976fe --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/common/abstract_monitor_tickets.py @@ -0,0 +1,233 @@ +from abc import abstractmethod, ABC +from datetime import datetime + +import typeguard + +from cloud_governance.cloud_resource_orchestration.utils.common_operations import string_equal_ignore_case +from cloud_governance.cloud_resource_orchestration.utils.elastic_search_queries import ElasticSearchQueries +from cloud_governance.cloud_resource_orchestration.utils.constant_variables import FIRST_CRO_ALERT, SECOND_CRO_ALERT, \ + CLOSE_JIRA_TICKET, JIRA_ISSUE_NEW_STATE, DATE_FORMAT +from cloud_governance.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations +from cloud_governance.common.jira.jira_operations import JiraOperations +from cloud_governance.common.logger.init_logger import logger +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.common.mails.mail_message import MailMessage +from cloud_governance.common.mails.postfix import Postfix +from cloud_governance.main.environment_variables import environment_variables + + +class AbstractMonitorTickets(ABC): + """ + This Abstract class perform the operations for monitoring tickets + """ + + def __init__(self): + super().__init__() + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.__es_operations = ElasticSearchOperations() + self.__es_index_cro = self.__environment_variables_dict.get('CRO_ES_INDEX', '') + self.__account_name = self.__environment_variables_dict.get('account') + self.__cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME') + self.__ticket_over_usage_limit = self.__environment_variables_dict.get('TICKET_OVER_USAGE_LIMIT') + self.__default_admins = self.__environment_variables_dict.get('CRO_DEFAULT_ADMINS', []) + self.__elasticsearch_queries = ElasticSearchQueries() + self.__jira_operations = JiraOperations() + self.__mail_message = MailMessage() + self.__postfix = Postfix() + + def __get_all_in_progress_tickets(self, account_name: str = '', cloud_name: str = '', fields: list = None): + """ + This method returns all in-progress tickets + :param account_name: + :param cloud_name: + :param fields: + :return: + """ + account_name = account_name if account_name else self.__account_name + cloud_name = cloud_name if cloud_name else self.__cloud_name + match_conditions = [ + {"term": {"account_name.keyword": account_name}}, + {"term": {"cloud_name.keyword": cloud_name}} + ] + in_progress_tickets_query = self.__elasticsearch_queries.get_all_in_progress_tickets( + match_conditions=match_conditions, fields=fields) + in_progress_tickets_list = self.__es_operations.fetch_data_by_es_query(query=in_progress_tickets_query, + es_index=self.__es_index_cro, filter_path='hits.hits._source') + return in_progress_tickets_list + + @abstractmethod + def update_budget_tag_to_resources(self, region_name: str, ticket_id: str, updated_budget: int): + """ + This method updates the budget to cloud resources + :param region_name: + :param ticket_id: + :param updated_budget: + :return: + """ + raise NotImplemented("This method is not implemented") + + @abstractmethod + def update_duration_tag_to_resources(self, region_name: str, ticket_id: str, updated_duration: int): + """ + This method updates the budget to cloud resources + :param region_name: + :param ticket_id: + :param updated_duration: + :return: + """ + raise NotImplemented("This method is not implemented") + + @logger_time_stamp + def extend_tickets_budget(self, ticket_id: str, region_name: str): + """ + This method extends the ticket budget if any + :param ticket_id: + :param region_name: + :return: + """ + ticket_extended = False + sub_ticket_ids = self.__jira_operations.get_budget_extend_tickets(ticket_id=ticket_id, ticket_state='inprogress') + if sub_ticket_ids: + total_budget_to_extend = self.__jira_operations.get_total_extend_budget(sub_ticket_ids=sub_ticket_ids) + if string_equal_ignore_case(self.__cloud_name, 'AWS'): + self.update_budget_tag_to_resources(region_name=region_name, ticket_id=ticket_id, + updated_budget=total_budget_to_extend) + for sub_ticket_id in sub_ticket_ids: + self.__jira_operations.move_issue_state(ticket_id=sub_ticket_id, state='closed') + logger.info(f'Updated the budget of the ticket: {ticket_id}') + ticket_extended = True + else: + logger.info(f'No extended tickets for the TicketId: {ticket_id}') + return ticket_extended + + @typeguard.typechecked + @logger_time_stamp + def extend_ticket_duration(self, ticket_id: str, region_name: str): + """ + This method extends the duration of the ticket if any + :param ticket_id: + :param region_name: + :return: + """ + tickets_found = False + sub_ticket_ids = self.__jira_operations.get_duration_extend_tickets(ticket_id=ticket_id, ticket_state='new') + if sub_ticket_ids: + total_duration_to_extend = self.__jira_operations.get_total_extend_duration(sub_ticket_ids=sub_ticket_ids) + if string_equal_ignore_case(self.__cloud_name, 'AWS'): + self.update_duration_tag_to_resources(region_name=region_name, ticket_id=ticket_id, + updated_duration=total_duration_to_extend) + for sub_ticket_id in sub_ticket_ids: + self.__jira_operations.move_issue_state(ticket_id=sub_ticket_id, state='closed') + logger.info(f'Updated the Duration of the ticket: {ticket_id}') + tickets_found = True + else: + logger.info(f'No extended tickets for the TicketId: {ticket_id}') + return tickets_found + + @typeguard.typechecked + @logger_time_stamp + def __close_and_update_ticket_data_in_es(self, ticket_id: str): + """ + This method close the ticket and update in ElasticSearch + :return: + """ + data = {'timestamp': datetime.utcnow(), 'ticket_id_state': 'closed'} + if self.__es_operations.check_elastic_search_connection(): + self.__es_operations.update_elasticsearch_index(index=self.__es_index_cro, id=ticket_id, metadata=data) + self.__jira_operations.move_issue_state(ticket_id, state='CLOSED') + + @typeguard.typechecked + @logger_time_stamp + def _monitor_ticket_duration(self, ticket_id: str, region_name: str, duration: int, completed_duration: int, **kwargs): + """ + This method monitors the ticket duration + :param ticket_id: + :param region_name: + :param duration: + :return: + """ + user, cc = kwargs.get('user_cro'), self.__default_admins + cc.append(kwargs.get('approved_manager')) + remaining_duration = duration - completed_duration + ticket_extended = False + subject = body = None + if remaining_duration <= FIRST_CRO_ALERT: + ticket_extended = self.extend_ticket_duration(ticket_id=ticket_id, region_name=region_name) + if not ticket_extended and remaining_duration == FIRST_CRO_ALERT: + subject, body = self.__mail_message.cro_monitor_alert_message(user=user, days=FIRST_CRO_ALERT, ticket_id=ticket_id) + elif remaining_duration == SECOND_CRO_ALERT: + subject, body = self.__mail_message.cro_monitor_alert_message(user=user, days=SECOND_CRO_ALERT, ticket_id=ticket_id) + else: + if not ticket_extended: + if remaining_duration <= CLOSE_JIRA_TICKET: + self.__close_and_update_ticket_data_in_es(ticket_id=ticket_id) + subject, body = self.__mail_message.cro_send_closed_alert(user, ticket_id) + if subject and body: + self.__postfix.send_email_postfix(to=user, cc=cc, subject=subject, content=body, mime_type='html') + + @typeguard.typechecked + @logger_time_stamp + def _monitor_ticket_budget(self, ticket_id: str, region_name: str, budget: int, used_budget: int, **kwargs): + """ + This method monitors the ticket budget + :param ticket_id: + :param region_name: + :param budget: + :param used_budget + :return: + """ + user, cc = kwargs.get('user_cro'), self.__default_admins + remaining_budget = budget - used_budget + threshold_budget = budget - (budget * (self.__ticket_over_usage_limit / 100)) + subject = body = None + if threshold_budget >= remaining_budget >= 0: + ticket_extended = self.extend_tickets_budget(ticket_id=ticket_id, region_name=region_name) + if not ticket_extended: + subject, body = self.__mail_message.cro_monitor_budget_remain_alert(user=user, budget=budget, + ticket_id=ticket_id, + used_budget=used_budget, + remain_budget=remaining_budget) + elif remaining_budget <= 0: + subject, body = self.__mail_message.cro_monitor_budget_remain_alert(user=user, budget=budget, + ticket_id=ticket_id, + used_budget=used_budget, + remain_budget=remaining_budget) + if subject and body: + self.__postfix.send_email_postfix(to=user, cc=cc, subject=subject, content=body, mime_type='html') + + @logger_time_stamp + def _monitor_in_progress_tickets(self): + """ + This method monitors in-progress tickets + :return: + """ + in_progress_tickets_list = self.__get_all_in_progress_tickets() + for ticket_data in in_progress_tickets_list: + source_data = ticket_data.get('_source') + if source_data: + ticket_id = source_data.get('ticket_id') + region_name = source_data.get('region_name', '') + budget = int(source_data.get('estimated_cost', 0)) + duration = int(source_data.get('duration', 0)) + used_budget = int(source_data.get('actual_cost', 0)) + ticket_start_date = datetime.strptime(source_data.get('ticket_opened_date'), DATE_FORMAT).date() + completed_duration = (datetime.utcnow().date() - ticket_start_date).days + if ticket_id == '200': + completed_duration = 25 + used_budget = 9 + self._monitor_ticket_budget(ticket_id=ticket_id, region_name=region_name, budget=budget, + used_budget=used_budget, + user_cro=source_data.get('user_cro'), + approved_manager=source_data.get('approved_manager')) + self._monitor_ticket_duration(ticket_id=ticket_id, region_name=region_name, duration=duration, + completed_duration=completed_duration, + user_cro=source_data.get('user_cro'), + approved_manager=source_data.get('approved_manager') + ) + + def monitor_tickets(self): + """ + This method monitor all tickets by status + :return: + """ + self._monitor_in_progress_tickets() diff --git a/cloud_governance/cloud_resource_orchestration/common/ec2_monitor_operations.py b/cloud_governance/cloud_resource_orchestration/common/ec2_monitor_operations.py deleted file mode 100644 index 6305c5ca..00000000 --- a/cloud_governance/cloud_resource_orchestration/common/ec2_monitor_operations.py +++ /dev/null @@ -1,231 +0,0 @@ -import calendar -from datetime import datetime, date -import json -import os.path - -import pytz - -from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations -from cloud_governance.common.clouds.aws.price.price import AWSPrice -from cloud_governance.common.elasticsearch.elastic_upload import ElasticUpload -from cloud_governance.main.environment_variables import environment_variables - - -class EC2MonitorOperations: - """This class contains the ec2 monitor operations""" - - CURRENT_DAY = datetime.now() - HOURS_IN_SECONDS = 3600 - DEFAULT_ROUND_VALUE = 3 - HOURS_IN_DAY = 24 - DEFAULT_OS = 'Linux' - - def __init__(self, region_name: str): - self.__environment_variables_dict = environment_variables.environment_variables_dict - self.__region_name = region_name - self.__trails_snapshot_time = self.__environment_variables_dict.get('TRAILS_SNAPSHOT_TIME') - self.__trail_logs = self.get_trail_logs() - self.__es_upload = ElasticUpload() - self.__es_instance_data = {} - self.__aws_price = AWSPrice(region_name=self.__region_name) - self.__ec2_operations = EC2Operations(region=self.__region_name) - - def get_trail_logs(self): - """This method get trail logs from the file""" - if self.__trails_snapshot_time: - file_path = f'/tmp/{self.__trails_snapshot_time.date()}.json' - if os.path.exists(file_path): - with open(file_path) as file: - trails = json.load(file) - return trails if trails else [] - else: - raise FileNotFoundError - return [] - - def get_instance_data_in_es(self, jira_id: str, instance_id: str): - """This method get instance data in es""" - try: - es_index = self.__es_upload.es_index - if self.__es_upload.elastic_search_operations.verify_elastic_index_doc_id(index=es_index, doc_id=jira_id): - es_data = self.__es_upload.elastic_search_operations.get_es_data_by_id(id=jira_id, index=es_index) - self.__es_instance_data = es_data.get('_source').get(instance_id) - except: - pass - - def get_value_from_es_data(self, name: str): - """ - This method get the data from the - """ - if self.__es_instance_data: - source = self.__es_instance_data - return source.get(name) - return '' - - def get_instance_logs(self, instance_id: str, last_saved_time: datetime): - """This method returns the instance logs""" - instance_logs = [] - for trail_log in self.__trail_logs: - for resource in trail_log.get('Resources'): - if resource.get('ResourceName') == instance_id: - event_time = datetime.strptime(trail_log.get('EventTime'), "%Y-%m-%dT%H:%M:%S%z") - event_name = trail_log.get('EventName') - if event_name == 'TerminateInstances': - event_name = 'StopInstances' - if last_saved_time: - if last_saved_time < event_time: - instance_logs.append({'Key': event_name, 'Value': event_time}) - else: - instance_logs.append({'Key': event_name, 'Value': event_time}) - return sorted(instance_logs, key=lambda a: a['Value']) if instance_logs else [] - - def get_hours_in_two_date_times(self, time1: datetime, time2: datetime): - """This method get the difference between two time hours""" - diff_time = time2 - time1 - hours = diff_time.total_seconds()/self.HOURS_IN_SECONDS - return round(hours, self.DEFAULT_ROUND_VALUE) - - def process_tails(self, trails: list): - """ - This method processes the instance start/stop logs and removes consecutive start/stop logs - """ - if len(trails) == 0 or len(trails) == 1: - return trails - processed_trails = [trails[0]] - prev_instance_state = trails[0]["Key"] - for idx in range(1, len(trails)): - curr_instance_state = trails[idx]["Key"] - if prev_instance_state != curr_instance_state: - processed_trails.append(trails[idx]) - prev_instance_state = curr_instance_state - return processed_trails - - def get_run_hours_from_trails(self, trails: list, last_instance_state: str, create_datetime: datetime, launch_time: datetime, trails_snapshot_time: datetime = environment_variables.environment_variables_dict.get('TRAILS_SNAPSHOT_TIME'), last_saved_time: datetime = None, present_state: str = ''): - """ - This method returns the trail hours - """ - trails = self.process_tails(trails) - tzinfo = launch_time.tzinfo - run_hours = 0 - if not trails: - if last_instance_state not in ('stopped', 'terminated'): - if last_saved_time: - run_hours += self.get_hours_in_two_date_times(time1=last_saved_time, time2=trails_snapshot_time) - else: - run_hours += self.get_hours_in_two_date_times(time1=launch_time, time2=trails_snapshot_time) - return run_hours - start = 0 - end = len(trails) - 1 - if present_state == 'terminated' and len(trails) == 1: - return 0 - if trails[0].get('Key') == 'StopInstances' and trails[len(trails) - 1].get('Key') == 'StopInstances': - start += 1 - stop_event_time = trails[0].get('Value').astimezone(tzinfo) - if last_saved_time: - run_hours += self.get_hours_in_two_date_times(time1=last_saved_time, time2=stop_event_time) - else: - if create_datetime < launch_time < stop_event_time: - run_hours += self.get_hours_in_two_date_times(time1=launch_time, time2=stop_event_time) - else: - run_hours += self.get_hours_in_two_date_times(time1=create_datetime, time2=stop_event_time) - elif trails[0].get('Key') == 'StartInstances' and trails[len(trails) - 1].get('Key') == 'StartInstances': - end -= 1 - start_event_time = trails[0].get('Value').astimezone(tzinfo) - run_hours += self.get_hours_in_two_date_times(time1=start_event_time, time2=trails_snapshot_time) - elif trails[0].get('Key') == 'StopInstances' and trails[len(trails) - 1].get('Key') == 'StartInstances': - start += 1 - stop_event_time = trails[0].get('Value').astimezone(tzinfo) - if last_saved_time: - run_hours += self.get_hours_in_two_date_times(time1=last_saved_time, time2=stop_event_time) - else: - if create_datetime < launch_time < stop_event_time: - run_hours += self.get_hours_in_two_date_times(time1=launch_time, time2=stop_event_time) - else: - run_hours += self.get_hours_in_two_date_times(time1=create_datetime, time2=stop_event_time) - end -= 1 - start_event_time = trails[0].get('Value').astimezone(tzinfo) - run_hours += self.get_hours_in_two_date_times(time1=start_event_time, time2=trails_snapshot_time) - while start < end and (end - start + 1) > 0: - start_event_time = trails[start].get('Value').astimezone(tzinfo) - stop_event_time = trails[start + 1].get('Value').astimezone(tzinfo) - run_hours += self.get_hours_in_two_date_times(time1=start_event_time, time2=stop_event_time) - start += 2 - return round(run_hours, self.DEFAULT_ROUND_VALUE) - - def get_last_saved_time(self, tzinfo): - """This method return the last saved time from the es_data""" - if self.__es_instance_data: - saved_time = self.get_value_from_es_data(name='last_saved_time') - if saved_time: - return datetime.strptime(saved_time, "%Y-%m-%dT%H:%M:%S%z") - return None - - def get_attached_time(self, volume_list: list): - """ - This method return the root volume attached time - """ - for mapping in volume_list: - if mapping.get('Ebs').get('DeleteOnTermination'): - return mapping.get('Ebs').get('AttachTime') - return '' - - def get_instance_run_hours(self, instance: dict, jira_id: str): - """This method get the instance run hours""" - instance_id, instance_state, launch_time = instance.get('InstanceId'), instance.get('State')['Name'], instance.get('LaunchTime') - launch_time = launch_time.astimezone(pytz.utc) - tzinfo = launch_time.tzinfo - self.get_instance_data_in_es(jira_id, instance_id=instance_id) - last_saved_time = self.get_last_saved_time(tzinfo) - create_datetime = self.get_attached_time(instance.get('BlockDeviceMappings')) - last_instance_state = self.get_value_from_es_data(name='instance_state') if self.get_value_from_es_data(name='instance_state') else instance_state - trails_snapshot_time = self.__trails_snapshot_time if self.__trails_snapshot_time else datetime.now().replace(microsecond=0).astimezone(tzinfo) - instance_trails = self.get_instance_logs(instance_id, last_saved_time=last_saved_time) - run_hours = self.get_run_hours_from_trails(trails=instance_trails, - launch_time=launch_time, - trails_snapshot_time=trails_snapshot_time, - last_saved_time=last_saved_time, last_instance_state=last_instance_state, - create_datetime=create_datetime) - return run_hours, trails_snapshot_time - - def get_instance_hours_price(self, instance_type: str, run_hours: float): - """ - This method returns the instance pricing - """ - region_code = self.__aws_price.get_region_name(self.__region_name) - price = float(self.__aws_price.get_price(instance=instance_type, region=region_code, os=self.DEFAULT_OS)) - return round(price * run_hours, self.DEFAULT_ROUND_VALUE) - - def calculate_days(self, launch_date: datetime): - """This method return the no. of days""" - today = date.today() - diff_date = today - launch_date.date() - return diff_date.days - - def get_volumes_cost(self, block_device_mappings: list): - """This method return the volumes cost from instance attached volumes""" - volumes_list = [] - for mapping in block_device_mappings: - if mapping.get('Ebs').get('VolumeId'): - volumes_list.append(mapping.get('Ebs').get('VolumeId')) - volumes = self.__ec2_operations.get_volumes(VolumeIds=volumes_list) - ebs_price = 0 - for volume in volumes: - create_time = volume.get('CreateTime') - if self.__trails_snapshot_time: - create_time = self.__trails_snapshot_time.astimezone(create_time.tzinfo) - current_datetime = datetime.utcnow().replace(microsecond=0, tzinfo=create_time.tzinfo) - hours = self.get_hours_in_two_date_times(time1=create_time, time2=current_datetime) - months = round(hours / calendar.monthrange(current_datetime.year, current_datetime.month)[1], self.DEFAULT_ROUND_VALUE) - ebs_price += round(self.__aws_price.get_ec2_price(resource='ebs', item_data=volume) * months, self.DEFAULT_ROUND_VALUE) - return ebs_price - - def get_instances_by_filtering(self, tag_key_name: str): - """This method get the instances with the tag-key filter""" - filters = { - 'Filters': [ - { - 'Name': 'tag-key', - 'Values': [tag_key_name] - } - ] - } - return self.__ec2_operations.get_instances(**filters) diff --git a/cloud_governance/cloud_resource_orchestration/monitor/cloud_monitor.py b/cloud_governance/cloud_resource_orchestration/monitor/cloud_monitor.py index c31ae14f..3694b9e7 100644 --- a/cloud_governance/cloud_resource_orchestration/monitor/cloud_monitor.py +++ b/cloud_governance/cloud_resource_orchestration/monitor/cloud_monitor.py @@ -1,85 +1,42 @@ -import datetime -import json -from json import JSONEncoder -from datetime import datetime, timedelta -import os.path - -import pytz - -from cloud_governance.cloud_resource_orchestration.aws.long_run.ec2_long_run import EC2LongRun -from cloud_governance.common.clouds.aws.cloudtrail.cloudtrail_operations import CloudTrailOperations +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.run_cro import RunCRO from cloud_governance.common.jira.jira import logger from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp from cloud_governance.main.environment_variables import environment_variables -class DateTimeEncoder(JSONEncoder): - def default(self, o): - if isinstance(o, datetime): - return o.isoformat() - return super(DateTimeEncoder, self).default(o) - - class CloudMonitor: """ - This class run the short run & long run monitoring + This class run CRO Monitoring """ + AWS = "AWS" + GCP = "GCP" + AZURE = "AZURE" + IBM = "IBM" + def __init__(self): self.__environment_variables_dict = environment_variables.environment_variables_dict self.__region = self.__environment_variables_dict.get('AWS_DEFAULT_REGION', '') - self.__cloud_name = self.__environment_variables_dict.get('CLOUD_NAME') + self.__cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME') self.__monitor = self.__environment_variables_dict.get('MONITOR') - self._cloud_trail = CloudTrailOperations(self.__region) - self.__current_date_time = datetime.utcnow().replace(microsecond=0, tzinfo=pytz.utc) - environment_variables._environment_variables_dict['TRAILS_SNAPSHOT_TIME'] = self.__current_date_time - - def __write_cloudtrail_logs(self): - """ - This method fetched the cloudtrail logs per day - """ - end_time = self.__current_date_time - start_time = end_time - timedelta(days=1) - cloud_trail_logs = [] - cloud_trail_logs.extend(self._cloud_trail.get_full_responses(StartTime=start_time, EndTime=end_time, LookupAttributes=[{'AttributeKey': 'EventName', 'AttributeValue': 'StopInstances'}], MaxResults=123)) - cloud_trail_logs.extend(self._cloud_trail.get_full_responses(StartTime=start_time, EndTime=end_time, LookupAttributes=[{'AttributeKey': 'EventName', 'AttributeValue': 'StartInstances'}], MaxResults=123)) - cloud_trail_logs.extend(self._cloud_trail.get_full_responses(StartTime=start_time, EndTime=end_time, LookupAttributes=[{'AttributeKey': 'EventName', 'AttributeValue': 'TerminateInstances'}], MaxResults=123)) - json_data = json.dumps(cloud_trail_logs, cls=DateTimeEncoder) - path = f'/tmp/{end_time.date()}.json' - if os.path.exists(path): - os.remove(path) - logger.info(f"Deleted the file path:{path}") - with open(path, 'w') as file: - file.write(json_data) - logger.info(path) - - def __delete_cloudtrail_logs(self): - """ - This method delete the file of cloudtrail logs - """ - file_path = f'/tmp/{self.__current_date_time}.json' - if os.path.exists(file_path): - os.remove(file_path) - logger.info(f"Deleted the file path:{file_path}") + self.__account = self.__environment_variables_dict.get('account') + self.__run_cro = RunCRO() @logger_time_stamp def aws_cloud_monitor(self): """ This method ture if the cloud name is """ - self.__write_cloudtrail_logs() - if self.__monitor == 'long_run': - ec2_long_run = EC2LongRun() - ec2_long_run.run() - self.__delete_cloudtrail_logs() + self.__run_cro.run() @logger_time_stamp def run_cloud_monitor(self): """ - This verify the cloud and run the monitor + This method run the public cloud monitor """ - if self.__cloud_name.upper() == "AWS".upper(): - logger.info(f'Account = {self.__environment_variables_dict.get("account")}, Region = {self.__region}, Monitoring = {self.__monitor}') + + if self.__cloud_name.upper() == self.AWS: + logger.info(f'CLOUD_RESOURCE_ORCHESTRATION = True, PublicCloudName = {self.__cloud_name}, Account = {self.__account}') self.aws_cloud_monitor() def run(self): diff --git a/tests/unittest/cloud_resource_orchestration/aws/__init__.py b/cloud_governance/cloud_resource_orchestration/utils/__init__.py similarity index 100% rename from tests/unittest/cloud_resource_orchestration/aws/__init__.py rename to cloud_governance/cloud_resource_orchestration/utils/__init__.py diff --git a/cloud_governance/cloud_resource_orchestration/utils/common_operations.py b/cloud_governance/cloud_resource_orchestration/utils/common_operations.py new file mode 100644 index 00000000..06d71200 --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/utils/common_operations.py @@ -0,0 +1,42 @@ + + +def string_equal_ignore_case(value1: str, value2: str, *args) -> bool: + """ + This method finds all values are equal and returns bool + :return: + """ + equal = value1.lower() == value2.lower() + if args: + for val in args: + equal = value1.lower() == val.lower() and equal + return equal + + +def integer_equal(value1: int, value2: int, *args) -> bool: + """ + This method finds all values are equal and returns bool + :param value1: + :param value2: + :param args: + :return: + """ + equal = value1 == value2 + if args: + for val in args: + equal = equal and value1 == val + return equal + + +def get_tag_value_by_name(tags: list, tag_name: str) -> str: + """ + This method returns the tag_value + :param tags: + :param tag_name: + :return: + """ + for tag in tags: + key = tag.get('Key') + value = tag.get('Value') + if string_equal_ignore_case(key, tag_name): + return value + return '' diff --git a/cloud_governance/cloud_resource_orchestration/utils/constant_variables.py b/cloud_governance/cloud_resource_orchestration/utils/constant_variables.py new file mode 100644 index 00000000..4e80b199 --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/utils/constant_variables.py @@ -0,0 +1,14 @@ + +AWS: str = "AWS" +GCP: str = "GCP" +AZURE: str = "AZURE" +IBM: str = "IBM" +JIRA_ISSUE_NEW_STATE: str = 'New' +JIRA_ISSUE_REFINEMENT_STATE: str = 'Refinement' +JIRA_ISSUE_CLOSED_STATE: str = 'Closed' +JIRA_ISSUE_IN_PROGRESS_STATE: str = 'In Progress' +FIRST_CRO_ALERT: int = 5 +SECOND_CRO_ALERT: int = 3 +CLOSE_JIRA_TICKET: int = 0 +DEFAULT_ROUND_DIGITS: int = 3 +DATE_FORMAT: str = '%Y-%m-%d' diff --git a/cloud_governance/cloud_resource_orchestration/utils/elastic_search_queries.py b/cloud_governance/cloud_resource_orchestration/utils/elastic_search_queries.py new file mode 100644 index 00000000..98a4534f --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/utils/elastic_search_queries.py @@ -0,0 +1,46 @@ + +from datetime import datetime, timedelta + + +class ElasticSearchQueries: + """ + This class having the ElasticSearch Queries used in the cloud-resource-orchestration + """ + + def __init__(self, cro_duration_days: int = 30): + self.current_end_date = datetime.utcnow() + self.current_start_date = self.current_end_date - timedelta(days=cro_duration_days) + + def get_all_in_progress_tickets(self, match_conditions: list = None, fields: list = None, **kwargs): + """ + This method returns all the in progress tickets + :param fields: + :param match_conditions: + :param kwargs: + :return: + """ + if not match_conditions: + match_conditions = [] + query = \ + { # check user opened the ticket in elastic_search + "query": { + "bool": { + "must": [ + {"terms": {"ticket_id_state.keyword": ['in-progress']}}, + *match_conditions, + ], + "filter": { + "range": { + "timestamp": { + "format": "yyyy-MM-dd", + "lte": str(self.current_end_date.date()), + "gte": str(self.current_start_date.date()), + } + } + } + } + }, + } + if fields: + query['_source'] = fields + return query diff --git a/cloud_governance/cloud_resource_orchestration/utils/es_schema.json b/cloud_governance/cloud_resource_orchestration/utils/es_schema.json new file mode 100644 index 00000000..7dcc8d9e --- /dev/null +++ b/cloud_governance/cloud_resource_orchestration/utils/es_schema.json @@ -0,0 +1,34 @@ +{ + "_index": "cloud-governance-resource-orchestration", + "_type": "_doc", + "_id": "#", + "_source": { + "cloud_name": "string", + "account_name": "string", + "region_name": "string", + "user": "string", + "user_cro": "string", + "actual_cost": "float", + "ticket_id": "int", + "ticket_id_state": "string", + "estimated_cost": "float", + "total_instances": "int", + "monitored_days": "int", + "ticket_opened_date": "date", + "duration": "int", + "approved_manager": "string", + "user_manager": "string", + "project": "string", + "owner": "string", + "total_spots": "int", + "total_ondemand": "int", + "AllocatedBudget": "int", + "instances": ["Name, Id, OnDemand/Spot, MachineType, State, RunningDays"], + "instance_types": ["m6a.2xlarge"], + "forecast": "float", + "timestamp": "2023-07-25T04:03:33.287478", + "policy": "cloud_resource_orchestration", + "TotalCurrentUsage-2023": "float", + "user_daily_cost": "{'Date': {'TicketId': 'TicketCost', 'User': 'ActualUserCost'}" + } +} diff --git a/tests/unittest/cloud_resource_orchestration/common/__init__.py b/cloud_governance/common/clouds/aws/athena/__init__.py similarity index 100% rename from tests/unittest/cloud_resource_orchestration/common/__init__.py rename to cloud_governance/common/clouds/aws/athena/__init__.py diff --git a/cloud_governance/common/clouds/aws/athena/abstract_athena_operations.py b/cloud_governance/common/clouds/aws/athena/abstract_athena_operations.py new file mode 100644 index 00000000..83a22c09 --- /dev/null +++ b/cloud_governance/common/clouds/aws/athena/abstract_athena_operations.py @@ -0,0 +1,32 @@ +from abc import ABC, abstractmethod +from datetime import datetime + +from cloud_governance.main.environment_variables import environment_variables + + +class AbstractAthenaOperations(ABC): + + CURRENT_DATE = str(datetime.utcnow().date()).replace("-", "") + + def __init__(self): + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.__s3_results_path = self.__environment_variables_dict.get('S3_RESULTS_PATH') + super().__init__() + + def _get_s3_path(self): + """ + This method returns the s3 path to dump athena results + :return: + """ + s3_path = f"{self.__s3_results_path}/{self.CURRENT_DATE}" + return s3_path + + @abstractmethod + def execute_query(self, query_string: str): + """ + This method executes the query in aws athena + :param query_string: + :return: + """ + raise NotImplemented() + diff --git a/cloud_governance/common/clouds/aws/athena/boto3_client_athena_operations.py b/cloud_governance/common/clouds/aws/athena/boto3_client_athena_operations.py new file mode 100644 index 00000000..c180526b --- /dev/null +++ b/cloud_governance/common/clouds/aws/athena/boto3_client_athena_operations.py @@ -0,0 +1,48 @@ +import boto3 +import typeguard + +from cloud_governance.common.clouds.aws.athena.abstract_athena_operations import AbstractAthenaOperations +from cloud_governance.common.logger.init_logger import logger +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp + + +class BotoClientAthenaOperations(AbstractAthenaOperations): + """ + This class performs the aws athena operations + """ + + def __init__(self): + super().__init__() + self.__athena_client = boto3.client('athena', region_name='us-east-1') + + @typeguard.typechecked + @logger_time_stamp + def execute_query(self, query_string: str): + """ + This method executes the query and returns the s3_path, QueryExecutionId + Limit: Continuously check the s3 bucket that file is created or not using the QueryExecutionId + :param query_string: + :return: + """ + try: + s3_path = self.__s3_results_path + logger.debug(f"Query Output path: s3_path/{self.CURRENT_DATE}") + result = self.__athena_client.start_query_execution( + QueryString=query_string, + ResultConfiguration={ + "OutputLocation": s3_path, + } + ) + if result: + bucket, key = self.__s3_results_path.replace("s3://", "").split('/') + return { + 'QueryExecutionId': result.get('QueryExecutionId'), + 's3_key': s3_path, + 's3_csv_path': f'{s3_path}/{result.get("QueryExecutionId")}.csv', + 'bucket': bucket, + 'key': f'{key}/{self.CURRENT_DATE}', + 'file_name': f'{result.get("QueryExecutionId")}.csv' + } + except Exception as err: + logger.error(err) + return {} diff --git a/cloud_governance/common/clouds/aws/athena/pyathena_operations.py b/cloud_governance/common/clouds/aws/athena/pyathena_operations.py new file mode 100644 index 00000000..e46b3b33 --- /dev/null +++ b/cloud_governance/common/clouds/aws/athena/pyathena_operations.py @@ -0,0 +1,28 @@ +import typeguard +from pyathena import connect +from pyathena.pandas.cursor import PandasCursor + +from cloud_governance.common.clouds.aws.athena.abstract_athena_operations import AbstractAthenaOperations +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp + + +class PyAthenaOperations(AbstractAthenaOperations): + + def __init__(self): + super().__init__() + self.conn = connect(s3_staging_dir=self._get_s3_path(), region_name="us-east-1", + cursor_class=PandasCursor).cursor() + + @typeguard.typechecked + @logger_time_stamp + def execute_query(self, query_string: str) -> list: + """ + This method returns list of dicts of athena results + :param query_string: + :return: + """ + try: + result_set = self.conn.execute(query_string).as_pandas() + return result_set.to_dict(orient='records') + except Exception as err: + raise err diff --git a/cloud_governance/common/clouds/aws/cloudtrail/cloudtrail_operations.py b/cloud_governance/common/clouds/aws/cloudtrail/cloudtrail_operations.py index 6e0615dd..6f68d5ba 100644 --- a/cloud_governance/common/clouds/aws/cloudtrail/cloudtrail_operations.py +++ b/cloud_governance/common/clouds/aws/cloudtrail/cloudtrail_operations.py @@ -194,16 +194,18 @@ def __get_time_difference(self, start_time: datetime): diff = (current_time - start_time.replace(tzinfo=None)) return (diff.days * 24 * 60 * 60) + diff.seconds - def get_username_by_instance_id_and_time(self, resource_id: str, resource_type: str, start_time: datetime = '', event_type: str = 'ResourceType'): + def get_username_by_instance_id_and_time(self, resource_id: str, resource_type: str, start_time: datetime = '', event_type: str = 'ResourceType', end_time: datetime = None): """ This method find Username in cloud trail events according to start_time and resource_id @param event_type: @param start_time: @param resource_id: @param resource_type: + @param end_time: @return: if user not found it return empty string + """ - if start_time: + if start_time and not end_time: delay_seconds = int(os.environ.get('SLEEP_SECONDS', self.SLEEP_SECONDS)) if self.__get_time_difference(start_time=start_time) <= delay_seconds: time.sleep(delay_seconds) @@ -211,8 +213,9 @@ def get_username_by_instance_id_and_time(self, resource_id: str, resource_type: end_time = start_time + search_time start_time = start_time - search_time else: - start_time = datetime.now() - timedelta(days=self.LOOKBACK_DAYS) - end_time = datetime.now() + if not start_time and not end_time: + start_time = datetime.now() - timedelta(days=self.LOOKBACK_DAYS) + end_time = datetime.now() username, event = self.__get_user_by_resource_id(start_time, end_time, resource_id, resource_type, event_type) return self.__check_filter_username(username, event) diff --git a/cloud_governance/common/clouds/aws/cost_explorer/cost_explorer_operations.py b/cloud_governance/common/clouds/aws/cost_explorer/cost_explorer_operations.py index 5321aa82..e01ed0b0 100644 --- a/cloud_governance/common/clouds/aws/cost_explorer/cost_explorer_operations.py +++ b/cloud_governance/common/clouds/aws/cost_explorer/cost_explorer_operations.py @@ -2,6 +2,8 @@ import boto3 +from cloud_governance.common.logger.init_logger import logger + class CostExplorerOperations: """ @@ -10,11 +12,106 @@ class CostExplorerOperations: START_DAY = 1 END_DAY = 31 + DIMENSIONS = 'Dimensions' + KEY = 'Key' + VALUES = 'Values' + PURCHASE_TYPE = 'PURCHASE_TYPE' + SPOT_INSTANCES = 'Spot Instances' + FILTER = 'Filter' + PURCHASE_OPTIONS = ['On Demand Instances', 'Savings Plans', SPOT_INSTANCES, 'Standard Reserved Instances'] + CE_COST_TYPES = {'CHARGETYPE': 'RECORD_TYPE', 'PURCHASETYPE': PURCHASE_TYPE} + CE_FILTER_TEMPLATE = { + DIMENSIONS: { + KEY: '', + VALUES: [] + } + } + CE_COST_FILTERS = {'SPOT': {KEY: PURCHASE_TYPE, VALUES: [SPOT_INSTANCES]}} - def __init__(self, ce_client = ''): + def __init__(self, ce_client=''): self.cost_explorer_client = boto3.client('ce') if not ce_client else ce_client - def get_cost_by_tags(self, tag: str, granularity: str = 'DAILY', cost_metric: str = 'UnblendedCost', start_date: str = '', end_date: str = ''): + def __get_ce_tag_filters(self, tag: str, ce_default_filter: dict): + """ + This method returns the ce filter values + :return: + """ + if ':' in tag: + tag, tag_filter = tag.split(':') + tag_filter = tag_filter.upper() + ce_filter = self.CE_FILTER_TEMPLATE + if tag_filter in self.CE_COST_FILTERS: + ce_filter[self.DIMENSIONS][self.KEY] = self.CE_COST_FILTERS[tag_filter][self.KEY] + ce_filter[self.DIMENSIONS][self.VALUES] = self.CE_COST_FILTERS[tag_filter][self.VALUES] + if ce_default_filter: + ce_default_filter = { + 'And': [ + ce_default_filter, + ce_filter + ] + } + else: + ce_default_filter = ce_filter + return ce_default_filter + + def get_ce_report_filter_data(self, ce_response: dict, tag_name: str): + """ + This method returns data filter by tag_name + :param tag_name: + :param ce_response: + :return: + """ + data = {} + if ce_response.get('ResultsByTime'): + for results_by_time in ce_response.get('ResultsByTime'): + start_time = results_by_time.get('TimePeriod').get('Start') + for group in results_by_time.get('Groups'): + name = group.get('Keys')[0].split('$')[-1].strip().replace(' ', '-') if group.get('Keys') else '' + amount = group.get('Metrics').get('UnblendedCost').get('Amount') if group.get('Metrics') else 0 + index_id = "%s-%s" % (start_time, name) + data[index_id] = { + tag_name: amount, + 'ce_match_id': index_id, + 'start_date': start_time + } + if ce_response.get('DimensionValueAttributes'): + for dimension_values in ce_response.get('DimensionValueAttributes'): + account_id = dimension_values.get("Value") + account = dimension_values.get('Attributes').get('description') + for key_index_id in data.keys(): + if account_id in key_index_id: + index_id = f'{data[key_index_id]["start_date"]}-{account}'.lower() + data[key_index_id].update({'Account': account, 'index_id': index_id}) + return data + + def get_filter_data(self, ce_data: list, tag_name: str = '', group_by: bool = True): + """ + This method filter the cost_explorer_data + :param group_by: + :param tag_name: + :param ce_data: + :return: + """ + user_cost_response = {} + if group_by: + for data in ce_data: + for cost_group in data.get('Groups'): + user = cost_group.get('Keys')[0].split('$')[-1] + user = user if user else 'REFUND' + user_cost = float(cost_group.get('Metrics').get('UnblendedCost').get('Amount')) + if user in user_cost_response: + user_cost_response[user]['Cost'] += user_cost + else: + user_cost_response[user] = {tag_name: user, 'Cost': user_cost} + return list(user_cost_response.values()) + else: + total_cost = 0 + for data in ce_data: + total_cost += float(data.get('Total').get('UnblendedCost').get('Amount')) + return total_cost + + def get_cost_by_tags(self, tag: str, granularity: str = 'DAILY', cost_metric: str = 'UnblendedCost', + start_date: str = '', end_date: str = '', **kwargs): """ This method extracts the price by Tag provided @return: @@ -24,12 +121,18 @@ def get_cost_by_tags(self, tag: str, granularity: str = 'DAILY', cost_metric: st start_date = end_date - timedelta(self.END_DAY) start_date = str(start_date.strftime('%Y-%m-%d')) end_date = str(end_date.strftime('%Y-%m-%d')) - if tag.upper() == 'ChargeType'.upper(): - return self.get_cost_and_usage_from_aws(start_date=start_date, end_date=end_date, granularity=granularity, GroupBy=[{'Type': 'DIMENSION', 'Key': 'RECORD_TYPE'}]) + if tag.upper() in self.CE_COST_TYPES: + return self.get_cost_and_usage_from_aws(start_date=start_date, end_date=end_date, granularity=granularity, + GroupBy=[{'Type': 'DIMENSION', 'Key': self.CE_COST_TYPES[tag.upper()]}], **kwargs) else: - return self.get_cost_and_usage_from_aws(start_date=start_date, end_date=end_date, granularity=granularity, cost_metric=cost_metric, GroupBy=[{'Type': 'TAG', 'Key': tag}]) + kwargs[self.FILTER] = self.__get_ce_tag_filters(tag=tag, ce_default_filter=kwargs.get(self.FILTER)) + if ':' in tag: + tag, tag_filter = tag.split(':') + return self.get_cost_and_usage_from_aws(start_date=start_date, end_date=end_date, granularity=granularity, + cost_metric=cost_metric, GroupBy=[{'Type': 'TAG', 'Key': tag}], **kwargs) - def get_cost_and_usage_from_aws(self, start_date: str, end_date: str, granularity: str = 'DAILY', cost_metric: str = 'UnblendedCost', **kwargs): + def get_cost_and_usage_from_aws(self, start_date: str, end_date: str, granularity: str = 'DAILY', + cost_metric: str = 'UnblendedCost', **kwargs): """ This method returns the cost and usage reports @param start_date: @@ -39,25 +142,51 @@ def get_cost_and_usage_from_aws(self, start_date: str, end_date: str, granularit @param kwargs: @return: """ - return self.cost_explorer_client.get_cost_and_usage(TimePeriod={ - 'Start': start_date, - 'End': end_date - }, Granularity=granularity, Metrics=[cost_metric], **kwargs) + try: + if self.FILTER in kwargs and not kwargs.get('Filter'): + kwargs.pop('Filter') + usage_cost = {} + response = self.cost_explorer_client.get_cost_and_usage(TimePeriod={ + 'Start': start_date, + 'End': end_date + }, Granularity=granularity, Metrics=[cost_metric], **kwargs) + usage_cost['GroupDefinitions'] = response.get('GroupDefinitions') + usage_cost['ResultsByTime'] = response.get('ResultsByTime') + usage_cost['DimensionValueAttributes'] = response.get('DimensionValueAttributes') + while response.get('NextPageToken'): + response = self.cost_explorer_client.get_cost_and_usage(TimePeriod={ + 'Start': start_date, + 'End': end_date + }, Granularity=granularity, Metrics=[cost_metric], NextPageToken=response.get('NextPageToken'), **kwargs) + usage_cost['ResultsByTime'].extend(response.get('ResultsByTime')) + usage_cost['DimensionValueAttributes'].extend(response.get('DimensionValueAttributes')) + return usage_cost + except Exception as err: + logger.error(err) + return { + 'ResultsByTime': [], + 'DimensionValueAttributes': [], + 'GroupDefinitions': [], + } def get_cost_forecast(self, start_date: str, end_date: str, granularity: str, cost_metric: str, **kwargs): """ - This method return the cost forecasting + This method returns the cost forecasting @param start_date: @param end_date: @param granularity: @param cost_metric: @return: """ - return self.cost_explorer_client.get_cost_forecast( - TimePeriod={ - 'Start': start_date, - 'End': end_date - }, - Granularity=granularity, - Metric=cost_metric, **kwargs - ) + try: + return self.cost_explorer_client.get_cost_forecast( + TimePeriod={ + 'Start': start_date, + 'End': end_date + }, + Granularity=granularity, + Metric=cost_metric, **kwargs + ) + except Exception as err: + logger.error(err) + return {'Total': {'Amount': 0}, 'ForecastResultsByTime': []} diff --git a/cloud_governance/common/clouds/aws/ec2/ec2_operations.py b/cloud_governance/common/clouds/aws/ec2/ec2_operations.py index 8e63cf57..735e7459 100644 --- a/cloud_governance/common/clouds/aws/ec2/ec2_operations.py +++ b/cloud_governance/common/clouds/aws/ec2/ec2_operations.py @@ -2,6 +2,7 @@ import boto3 import typeguard +from typing import Callable from cloud_governance.common.clouds.aws.utils.utils import Utils from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp @@ -13,6 +14,8 @@ class EC2Operations: This class is useful for writing EC2 Operations """ + TAG_BATCHES = 20 + def __init__(self, region: str = 'us-east-2'): """ Initializing the AWS resources @@ -320,12 +323,13 @@ def scan_cluster_non_cluster_resources(self, resources: list, tags: str = 'Tags' non_cluster.append(resource) return [cluster, non_cluster] - def get_instances(self, **kwargs): + def get_instances(self, ec2_client: boto3.client = None, **kwargs): """ This method returns all instances from the region @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_instances, + describe_instances = ec2_client.describe_instances if ec2_client else self.ec2_client.describe_instances + return self.utils.get_details_resource_list(func_name=describe_instances, input_tag='Reservations', check_tag='NextToken', **kwargs) def get_volumes(self, **kwargs): @@ -355,91 +359,104 @@ def get_security_groups(self): This method returns security groups in the region @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_security_groups, input_tag='SecurityGroups', check_tag='NextToken') + return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_security_groups, + input_tag='SecurityGroups', check_tag='NextToken') def get_elastic_ips(self): """ This method returns elastic_ips in the region @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_addresses, input_tag='Addresses', check_tag='NextToken') + return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_addresses, input_tag='Addresses', + check_tag='NextToken') def get_network_interface(self): """ This method returns network_interface in the region @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_network_interfaces, input_tag='NetworkInterfaces', check_tag='NextToken') + return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_network_interfaces, + input_tag='NetworkInterfaces', check_tag='NextToken') def get_load_balancers(self): """ This method returns load balancers in the region @return: """ - return self.utils.get_details_resource_list(func_name=self.elb1_client.describe_load_balancers, input_tag='LoadBalancerDescriptions', check_tag='Marker') + return self.utils.get_details_resource_list(func_name=self.elb1_client.describe_load_balancers, + input_tag='LoadBalancerDescriptions', check_tag='Marker') def get_load_balancers_v2(self): """ This method returns load balancers v2 in the region @return: """ - return self.utils.get_details_resource_list(func_name=self.elbv2_client.describe_load_balancers, input_tag='LoadBalancers', check_tag='Marker') + return self.utils.get_details_resource_list(func_name=self.elbv2_client.describe_load_balancers, + input_tag='LoadBalancers', check_tag='Marker') def get_vpcs(self): """ This method returns all vpcs @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_vpcs, input_tag='Vpcs', check_tag='NextToken') + return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_vpcs, input_tag='Vpcs', + check_tag='NextToken') def get_subnets(self): """ This method returns all subnets @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_subnets, input_tag='Subnets', check_tag='NextToken') + return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_subnets, input_tag='Subnets', + check_tag='NextToken') def get_route_tables(self): """ This method returns all route tables @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_route_tables, input_tag='RouteTables', check_tag='NextToken') + return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_route_tables, + input_tag='RouteTables', check_tag='NextToken') def get_internet_gateways(self): """ This method returns all internet gateways @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_internet_gateways, input_tag='InternetGateways', check_tag='NextToken') + return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_internet_gateways, + input_tag='InternetGateways', check_tag='NextToken') def get_dhcp_options(self): """ This method returns all dhcp options @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_dhcp_options, input_tag='DhcpOptions', check_tag='NextToken') + return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_dhcp_options, + input_tag='DhcpOptions', check_tag='NextToken') def get_vpce(self): """ This method returns all vpc endpoints @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_vpc_endpoints, input_tag='VpcEndpoints', check_tag='NextToken') + return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_vpc_endpoints, + input_tag='VpcEndpoints', check_tag='NextToken') def get_nat_gateways(self): """ This method returns all nat gateways @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_nat_gateways, input_tag='NatGateways', check_tag='NextToken') + return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_nat_gateways, + input_tag='NatGateways', check_tag='NextToken') def get_nacls(self): """ This method returns all network acls @return: """ - return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_network_acls, input_tag='NetworkAcls', check_tag='NextToken') + return self.utils.get_details_resource_list(func_name=self.ec2_client.describe_network_acls, + input_tag='NetworkAcls', check_tag='NextToken') def is_cluster_resource(self, resource_id: str): """ @@ -505,14 +522,137 @@ def get_global_ec2_list_by_user(self): users_list[user] = [user_data] return users_list - def get_tag_value_from_tags(self, tags: list, tag_name: str) -> str: + def get_tag_value_from_tags(self, tags: list, tag_name: str, cast_type: str = 'str', + default_value: any = '') -> any: """ This method return the tag value inputted by tag_name """ if tags: for tag in tags: - key = tag.get('Key').lower().replace("_", '').replace("-", '') + key = tag.get('Key').lower().replace("_", '').replace("-", '').strip() if key == tag_name.lower(): - return tag.get('Value') + if cast_type: + if cast_type == 'int': + return int(tag.get('Value').strip()) + elif cast_type == 'float': + return float(tag.get('Value').strip()) + else: + return str(tag.get('Value').strip()) + return tag.get('Value').strip() + return default_value + + def get_active_regions(self): + """ + This method return active regions in aws account + :return: + """ + responses = self.ec2_client.describe_regions()['Regions'] + active_regions = [] + for region in responses: + active_regions.append(region.get('RegionName')) + return active_regions + + def get_ec2_instance_list(self, **kwargs): + """ + This method returns the list of instances + :param kwargs: + :return: + """ + instances_list = [] + ignore_tag = kwargs.pop('ignore_tag', None) + instances = self.get_instances(**kwargs) + for instance in instances: + for resource in instance['Instances']: + skip_resource = False + if ignore_tag: + for tag in resource.get('Tags', []): + if tag.get('Key') == ignore_tag: + skip_resource = True + break + if not skip_resource: + instances_list.append(resource) + return instances_list + + def get_ec2_instance_ids(self, **kwargs): + """ + This method return the ec2 instance ids + :param kwargs: + :return: + """ + instances = self.get_ec2_instance_list(**kwargs) + instance_ids = [] + for instance in instances: + instance_ids.append(instance.get('InstanceId')) + return instance_ids + + def tag_ec2_resources(self, client_method: Callable, tags: list, resource_ids: list): + """ + This method tag the ec2 resources with batch wise of 10 + :param client_method: + :param tags: + :param resource_ids: + :return: + """ + co = 0 + for start in range(0, len(resource_ids), self.TAG_BATCHES): + end = start + self.TAG_BATCHES + client_method(Resources=resource_ids[start:end], Tags=tags) + co += 1 + return co + + def get_attached_time(self, volume_list: list): + """ + This method return the root volume attached time + :param volume_list: + :return: + """ + for mapping in volume_list: + if mapping.get('Ebs').get('DeleteOnTermination'): + return mapping.get('Ebs').get('AttachTime') return '' + def get_active_instances(self, tag_name: str, tag_value: str, skip_full_scan: bool = False, ignore_tag: str = ''): + """ + This method returns all active instances by filter tag_name, tag_value in all active regions + :param ignore_tag: + :param skip_full_scan: + :param tag_name: + :param tag_value: + :return: + """ + active_instances = {} + active_regions = self.get_active_regions() + for region_name in active_regions[::-1]: + filters = [{'Name': f'tag:{tag_name}', 'Values': [tag_value, tag_value.upper(), tag_value.lower(), tag_value.title()]}] + self.get_ec2_instance_list() + active_instances_in_region = self.get_ec2_instance_list(Filters=filters, ec2_client=boto3.client('ec2', region_name=region_name), ignore_tag=ignore_tag) + if active_instances_in_region: + if skip_full_scan: + return True + else: + active_instances[region_name] = active_instances_in_region + return False if skip_full_scan else active_instances + + def verify_active_instances(self, tag_name: str, tag_value: str): + """ + This method verify any active instances in all regions by tag_name, tag_value + :param tag_name: + :param tag_value: + :return: + """ + ignore_tag = 'TicketId' + return self.get_active_instances(tag_name=tag_name, tag_value=tag_value, skip_full_scan=True, ignore_tag=ignore_tag) + + def describe_tags(self, **kwargs): + """ + This method returns the all tags in aws region + :param kwargs: + :return: + """ + tags_list = [] + ec2_service_tags = self.ec2_client.describe_tags(**kwargs) + tags_list.extend(ec2_service_tags.get('Tags', [])) + while ec2_service_tags.get('NextToken'): + ec2_service_tags = self.ec2_client.describe_tags(NextToken=ec2_service_tags.get('NextToken'), **kwargs) + tags_list.extend(ec2_service_tags.get('Tags', [])) + return tags_list diff --git a/cloud_governance/common/clouds/aws/iam/iam_operations.py b/cloud_governance/common/clouds/aws/iam/iam_operations.py index ff88b4e6..de635d13 100644 --- a/cloud_governance/common/clouds/aws/iam/iam_operations.py +++ b/cloud_governance/common/clouds/aws/iam/iam_operations.py @@ -7,9 +7,10 @@ class IAMOperations: - def __init__(self): - self.iam_client = boto3.client('iam') + def __init__(self, iam_client=None): + self.iam_client = iam_client if iam_client else boto3.client('iam') self.utils = Utils() + self.__sts_client = sts_client = boto3.client('sts') def get_user_tags(self, username: str): """ @@ -45,7 +46,29 @@ def get_account_alias_cloud_name(self): This method returns the aws account alias and cloud name @return: """ - account_alias = self.iam_client.list_account_aliases()['AccountAliases'] - if account_alias: - return account_alias[0].upper(), 'AwsCloud'.upper() - return os.environ.get('account', '').upper(), 'AwsCloud'.upper() + try: + account_alias = self.iam_client.list_account_aliases()['AccountAliases'] + if account_alias: + return account_alias[0].upper(), 'AwsCloud'.upper() + except: + return os.environ.get('account', '').upper(), 'AwsCloud'.upper() + + def get_iam_users_list(self): + """ + This method return the IAM users list + :return: + """ + iam_users = [] + users = self.get_users() + for user in users: + iam_users.append(user.get('UserName')) + return iam_users + + def get_aws_account_id_name(self): + """ + This method returns the aws account_id + :return: + """ + response = self.__sts_client.get_caller_identity() + account_id = response['Account'] + return account_id diff --git a/tests/unittest/cloud_resource_orchestration/mocks/__init__.py b/cloud_governance/common/clouds/aws/resource_tagging_api/__init__.py similarity index 100% rename from tests/unittest/cloud_resource_orchestration/mocks/__init__.py rename to cloud_governance/common/clouds/aws/resource_tagging_api/__init__.py diff --git a/cloud_governance/common/clouds/aws/resource_tagging_api/resource_tag_api_operations.py b/cloud_governance/common/clouds/aws/resource_tagging_api/resource_tag_api_operations.py new file mode 100644 index 00000000..ad4bffb9 --- /dev/null +++ b/cloud_governance/common/clouds/aws/resource_tagging_api/resource_tag_api_operations.py @@ -0,0 +1,48 @@ +import boto3 + +from cloud_governance.common.logger.init_logger import logger + + +class ResourceTagAPIOperations: + """ + This class performs the resourcegrouptagapi operations + """ + + PAGINATION_TOKEN = 'PaginationToken' + + def __init__(self, region_name: str): + self.__client = boto3.client('resourcegroupstaggingapi', region_name=region_name) + + def get_resources(self, tag_name: str, tag_value: str = ''): + """ + This method returns all the resources having the tag_name and tag_value + :return: + """ + resources_list = [] + filters = {'Key': tag_name, 'Values': []} + if tag_value: + filters['Values'].append(tag_value) + try: + response = self.__client.get_resources(TagFilters=[filters]) + resources_list.extend(response['ResourceTagMappingList']) + while response.get(self.PAGINATION_TOKEN): + response = self.__client.get_resources(TagFilters=[filters], + PaginationToken=response.get(self.PAGINATION_TOKEN)) + resources_list.extend(response['ResourceTagMappingList']) + except Exception as err: + logger.error(err) + raise err + return resources_list + + def tag_resources(self, resource_arn_list: list, update_tags_dict: dict): + """ + This method updates/tags list to the given resource arn's + :param resource_arn_list: + :param update_tags_dict: {key: value} + :return: + """ + try: + self.__client.tag_resources(ResourceARNList=resource_arn_list, Tags=update_tags_dict) + except Exception as err: + logger.error(err) + raise err diff --git a/cloud_governance/common/clouds/aws/s3/s3_operations.py b/cloud_governance/common/clouds/aws/s3/s3_operations.py index b8a7bc1d..f25731ba 100644 --- a/cloud_governance/common/clouds/aws/s3/s3_operations.py +++ b/cloud_governance/common/clouds/aws/s3/s3_operations.py @@ -226,20 +226,22 @@ def download_objects(self, s3_target: str, local_source: str): @logger_time_stamp @typeguard.typechecked - def get_last_objects(self, bucket: str, logs_bucket_key: str, policy: str): + def get_last_objects(self, bucket: str, logs_bucket_key: str = '', policy: str = '', key_prefix: str = ''): """ This method return last object per policy, only path without file name @param logs_bucket_key: @param bucket: @param policy: + @param key_prefix: @return: """ try: - if '_' in policy: - policy = policy.replace('_', '-') - date_key = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y/%m/%d") - objs = self.__s3_client.list_objects_v2(Bucket=bucket, - Prefix=f'{logs_bucket_key}/{policy}/{date_key}')['Contents'] + if not key_prefix: + if '_' in policy: + policy = policy.replace('_', '-') + date_key = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y/%m/%d") + key_prefix = f'{logs_bucket_key}/{policy}/{date_key}' + objs = self.__s3_client.list_objects_v2(Bucket=bucket, Prefix=key_prefix)['Contents'] except: return None get_last_modified_key = lambda obj: int(obj['LastModified'].strftime('%s')) @@ -302,20 +304,21 @@ def __get_s3_latest_policy_file(self, policy: str): logs_bucket_key=f'{self.__logs_bucket_key}/{self.__region}', policy=policy) - def get_last_s3_policy_content(self, policy: str, file_name: str): + def get_last_s3_policy_content(self, policy: str = '', file_name: str = '', s3_file_path: str = None): """ This method return last policy content @return: """ with tempfile.TemporaryDirectory() as temp_local_directory: local_file = temp_local_directory + '/' + file_name + '.gz' - if self.__get_s3_latest_policy_file(policy=policy): - latest_policy_path = self.__get_s3_latest_policy_file(policy=policy) - self.download_file(bucket=self.__bucket, - key=str(latest_policy_path), - download_file=file_name + '.gz', - file_name_path=local_file) - # gzip - os.system(f"gzip -d {local_file}") - with open(os.path.join(temp_local_directory, file_name)) as f: - return f.read() + if not s3_file_path: + if self.__get_s3_latest_policy_file(policy=policy): + s3_file_path = self.__get_s3_latest_policy_file(policy=policy) + self.download_file(bucket=self.__bucket, + key=str(s3_file_path), + download_file=file_name + '.gz', + file_name_path=local_file) + # gzip + os.system(f"gzip -d {local_file}") + with open(os.path.join(temp_local_directory, file_name)) as f: + return f.read() diff --git a/cloud_governance/common/clouds/aws/savingsplan/__init__.py b/cloud_governance/common/clouds/aws/savingsplan/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloud_governance/common/clouds/aws/savingsplan/savings_plans_operations.py b/cloud_governance/common/clouds/aws/savingsplan/savings_plans_operations.py new file mode 100644 index 00000000..28083b79 --- /dev/null +++ b/cloud_governance/common/clouds/aws/savingsplan/savings_plans_operations.py @@ -0,0 +1,81 @@ +from datetime import datetime + +import boto3 +from dateutil.relativedelta import relativedelta + + +class SavingsPlansOperations: + """ + This class is for implementing savings plans methods + """ + + SAVINGS_PLANS = 'savingsPlans' + NEXT_TOKEN = 'nextToken' + DATE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' + DEFAULT_ROUND_DIGITS = 3 + + def __init__(self, savings_plan_client=None): + self.savings_plan_client = savings_plan_client if savings_plan_client else boto3.client(self.SAVINGS_PLANS.lower()) + + def get_savings_plans_months(self, start_date: datetime, end_date: datetime): + """ + This method returns the savings plans months ranges + :param start_date: + :param end_date: + :return: + """ + start_end_dates = [] + month_start_date = start_date + while month_start_date <= end_date: + # Calculate the month end date as the last day of the current month + month_end_date = min( + datetime(month_start_date.year, month_start_date.month, 1) + relativedelta(months=1) - relativedelta( + days=1), end_date) + start_end_dates.append({'start': month_start_date, 'end': month_end_date, 'month': month_start_date.strftime("%b")}) + month_start_date += relativedelta(months=1) + month_start_date = datetime(month_start_date.year, month_start_date.month, 1) + return start_end_dates + + def get_savings_filter_data(self, savings_plans_list: list = None): + """ + This method returns the savings plans filter values + :return: + """ + responses = [] + if not savings_plans_list: + savings_plans_list = self.get_savings_plans_list() + extract_values = ['savingsPlanId', 'state', 'savingsPlanType', + 'paymentOption', 'productTypes', 'commitment', 'upfrontPaymentAmount', + 'recurringPaymentAmount'] + for savings_plans in savings_plans_list: + start_date = datetime.strptime(savings_plans.get('start'), self.DATE_TIME_FORMAT) + end_date = datetime.strptime(savings_plans.get('end'), self.DATE_TIME_FORMAT) + total_payment = savings_plans.get('upfrontPaymentAmount') + daily_payment = float(total_payment) / (24 * 60) + for date_range in self.get_savings_plans_months(start_date=start_date, end_date=end_date): + start, end, month = date_range.get('start'), date_range.get('end'), date_range.get('month') + savings_id = savings_plans.get('savingsPlanId') + days = (end - start).days + 1 + month_payment = round(days * daily_payment, self.DEFAULT_ROUND_DIGITS) + start, end = str(start.date()), str(end.date()) + monthly_savings_data = {'start': start, 'end': end, 'filter_date': f"{start}-{month}", 'index_id': f"{start}-{savings_id}", "CloudName": "AWS", 'SavingsMonthlyPayment': month_payment, 'CostType': 'savings_plans'} + monthly_savings_data.update({value.title(): savings_plans.get(value) for value in extract_values}) + responses.append(monthly_savings_data) + return responses + + def get_savings_plans_list(self, states: list = [], **kwargs): + """ + This method returns the savings plans list + :param states: + :return: + """ + results = {} + kwargs.update({'states': states}) + if not kwargs.get('states'): + kwargs.pop('states') + response = self.savings_plan_client.describe_savings_plans(**kwargs) + results[self.SAVINGS_PLANS] = response.get(self.SAVINGS_PLANS) + while response.get(self.NEXT_TOKEN): + response = self.savings_plan_client.describe_savings_plans(**kwargs) + results[self.SAVINGS_PLANS].append(response.get(self.SAVINGS_PLANS)) + return results[self.SAVINGS_PLANS] diff --git a/cloud_governance/common/clouds/aws/utils/utils.py b/cloud_governance/common/clouds/aws/utils/utils.py index ce1d0e01..0c59ec7c 100644 --- a/cloud_governance/common/clouds/aws/utils/utils.py +++ b/cloud_governance/common/clouds/aws/utils/utils.py @@ -1,9 +1,9 @@ from typing import Callable -import boto3 import typeguard from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.main.environment_variables import environment_variables class Utils: @@ -13,7 +13,8 @@ class Utils: def __init__(self, region: str = 'us-east-2'): self.region = region - pass + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.__update_tag_bulks = self.__environment_variables_dict.get('UPDATE_TAG_BULKS') @typeguard.typechecked def get_details_resource_list(self, func_name: Callable, input_tag: str, check_tag: str, **kwargs): @@ -34,3 +35,49 @@ def get_details_resource_list(self, func_name: Callable, input_tag: str, check_t resources = func_name(Marker=resources[check_tag], **kwargs) resource_list.extend(resources[input_tag]) return resource_list + + @logger_time_stamp + def __tag_resources(self, client_method: Callable, resource_ids: list, tags: list, tags_name: str = 'Tags'): + """ + This method tag resources + :param client_method: + :param resource_ids: + :param tags: + :param tags_name: + :return: + """ + if tags_name == 'Tags': + client_method(Resources=resource_ids, Tags=tags) + + @logger_time_stamp + def __split_run_bulks(self, iterable: list, limit: int = 1): + """ + This method splits run into bulk depends on threads limit + @return: run bulks + """ + result = [] + length = len(iterable) + for ndx in range(0, length, limit): + result.append(iterable[ndx:min(ndx + limit, length)]) + return result + + @typeguard.typechecked + @logger_time_stamp + def tag_aws_resources(self, client_method: Callable, tags: list, resource_ids: list): + """ + This method tag the aws resources with batch wise of 20 + :param client_method: + :param tags: + :param resource_ids: + :return: + """ + if tags: + bulk_resource_ids_list = self.__split_run_bulks(iterable=resource_ids, limit=self.__update_tag_bulks) # split the aws resource_ids into batches + co = 0 + cpu_based_resource_ids_list = self.__split_run_bulks(iterable=bulk_resource_ids_list, limit=self.__update_tag_bulks) + for cpu_based_resource_ids_list in cpu_based_resource_ids_list: + for resource_ids_list in cpu_based_resource_ids_list: + self.__tag_resources(client_method, resource_ids_list, tags) + co += 1 + return co + diff --git a/cloud_governance/common/clouds/azure/cost_management/cost_management_operations.py b/cloud_governance/common/clouds/azure/cost_management/cost_management_operations.py index 9241a84c..9c7a77cb 100644 --- a/cloud_governance/common/clouds/azure/cost_management/cost_management_operations.py +++ b/cloud_governance/common/clouds/azure/cost_management/cost_management_operations.py @@ -3,6 +3,7 @@ import time import pytz +from azure.core.exceptions import HttpResponseError from azure.mgmt.costmanagement.models import QueryDataset, QueryAggregation, QueryTimePeriod, QueryGrouping from cloud_governance.common.clouds.azure.subscriptions.azure_operations import AzureOperations @@ -33,8 +34,13 @@ def get_usage(self, scope: str, start_date: datetime = '', end_date: datetime = ) }) return response.as_dict() - except Exception as e: - print(e) + except HttpResponseError as e: + logger.error(e) + if e.status_code == 429: + time.sleep(10) + return self.get_usage(scope, start_date=start_date, end_date=end_date, granularity=granularity, **kwargs) + except Exception as err: + logger.error(err) return [] @logger_time_stamp @@ -74,6 +80,11 @@ def get_forecast(self, scope: str, start_date: datetime = '', end_date: datetime row_data[data_date] = data result['rows'] = list(row_data.values()) return result + except HttpResponseError as e: + logger.error(e) + if e.status_code == 429: + time.sleep(10) + return self.get_usage(scope, start_date=start_date, end_date=end_date, granularity=granularity, **kwargs) except Exception as err: - logger.info(f'Error raised when fetching the forecasting results {err}') - return [] + logger.error(err) + return [] diff --git a/cloud_governance/common/clouds/gcp/__init__.py b/cloud_governance/common/clouds/gcp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloud_governance/common/clouds/gcp/google_account.py b/cloud_governance/common/clouds/gcp/google_account.py new file mode 100644 index 00000000..85b647de --- /dev/null +++ b/cloud_governance/common/clouds/gcp/google_account.py @@ -0,0 +1,56 @@ +from datetime import datetime, timedelta + +from google.cloud import bigquery +from typeguard import typechecked + +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.main.environment_variables import environment_variables + +import google.auth +from googleapiclient.discovery import build +from googleapiclient.errors import HttpError + + +class GoogleAccount: + """ + This class is for Google account operations + """ + + RETRIES = 3 + + def __init__(self): + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.__client = None + if self.__environment_variables_dict.get('GOOGLE_APPLICATION_CREDENTIALS'): + self.__creds, _ = google.auth.default() + self.__client = bigquery.Client() + + @typechecked() + @logger_time_stamp + def get_dates(self, diff_days: int): + """ + This method returns the start and end date + :param diff_days: + :return: + """ + end_date = datetime.now() + start_date = end_date - timedelta(days=diff_days) + return start_date, end_date + + @typechecked() + @logger_time_stamp + def query_list(self, queries: list): + """ + This method returns the query results that scans from the BigQuery + :param queries: + :return: + """ + queries_results = [] + for idx, query in enumerate(queries): + parent_job = self.__client.query(query) + results = parent_job.result() # Waits for job to complete. + query_rows = [] + for row in results: + query_rows.append(dict(row)) + queries_results.append(query_rows) + return queries_results diff --git a/cloud_governance/common/elasticsearch/elastic_upload.py b/cloud_governance/common/elasticsearch/elastic_upload.py index 68274089..32bcaa76 100644 --- a/cloud_governance/common/elasticsearch/elastic_upload.py +++ b/cloud_governance/common/elasticsearch/elastic_upload.py @@ -33,16 +33,16 @@ def es_upload_data(self, items: list, es_index: str = '', **kwargs): count = 0 for item in items: if not item.get('Account'): - item['Account'] = self.account + item['Account'] = kwargs.get('Account') if kwargs.get('Account') else self.account if kwargs.get('set_index'): self.elastic_search_operations.upload_to_elasticsearch(index=es_index, data=item, id=item[kwargs.get('set_index')]) else: self.elastic_search_operations.upload_to_elasticsearch(index=es_index, data=item) count += 1 if count > 0 and len(items) > 0: - logger.info(f'Data Uploaded to {es_index} successfully, Total data: {count}') + logger.warn(f'Data Uploaded to {es_index} successfully, Total data: {count}') except Exception as err: - logger.info(f'Error raised {err}') + logger.error(f'Error raised {err}') def literal_eval(self, data: any): """ diff --git a/cloud_governance/common/elasticsearch/elasticsearch_operations.py b/cloud_governance/common/elasticsearch/elasticsearch_operations.py index 9151dd2f..2289dc4b 100644 --- a/cloud_governance/common/elasticsearch/elasticsearch_operations.py +++ b/cloud_governance/common/elasticsearch/elasticsearch_operations.py @@ -1,7 +1,9 @@ - +import os from datetime import datetime, timedelta import time import pandas as pd +from elasticsearch.helpers import bulk + from cloud_governance.main.environment_variables import environment_variables from elasticsearch_dsl import Search @@ -24,15 +26,20 @@ class ElasticSearchOperations: # max search results MAX_SEARCH_RESULTS = 1000 MIN_SEARCH_RESULTS = 100 + DEFAULT_ES_BULK_LIMIT = 5000 - def __init__(self, es_host: str, es_port: str, region: str = '', bucket: str = '', logs_bucket_key: str = '', + def __init__(self, es_host: str = None, es_port: str = None, region: str = '', bucket: str = '', logs_bucket_key: str = '', timeout: int = 2000): self.__environment_variables_dict = environment_variables.environment_variables_dict - self.__es_host = es_host - self.__es_port = es_port + self.__es_host = es_host if es_host else self.__environment_variables_dict.get('es_host') + self.__es_port = es_port if es_port else self.__environment_variables_dict.get('es_port') self.__region = region self.__timeout = int(self.__environment_variables_dict.get('ES_TIMEOUT')) if self.__environment_variables_dict.get('ES_TIMEOUT') else timeout - self.__es = Elasticsearch([{'host': self.__es_host, 'port': self.__es_port}], timeout=self.__timeout, max_retries=2) + self.__account = self.__environment_variables_dict.get('account') + try: + self.__es = Elasticsearch([{'host': self.__es_host, 'port': self.__es_port}], timeout=self.__timeout, max_retries=2) + except: + self.__es = None def __elasticsearch_get_index_hits(self, index: str, uuid: str = '', workload: str = '', fast_check: bool = False, id: bool = False): @@ -111,11 +118,12 @@ def verify_elasticsearch_data_uploaded(self, index: str, uuid: str = '', workloa raise ElasticSearchDataNotUploaded @typechecked() - def upload_to_elasticsearch(self, index: str, data: dict, doc_type: str = '_doc', es_add_items: dict = None, **kwargs): + def upload_to_elasticsearch(self, index: str, data: dict, doc_type: str = '_doc', es_add_items: dict = None, + **kwargs): """ This method is upload json data into elasticsearch :param index: index name to be stored in elasticsearch - :param data: data must me in dictionary i.e. {'key': 'value'} + :param data: data must be in dictionary i.e. {'key': 'value'} :param doc_type: :param es_add_items: :return: @@ -131,8 +139,11 @@ def upload_to_elasticsearch(self, index: str, data: dict, doc_type: str = '_doc' # utcnow - solve timestamp issue if not data.get('timestamp'): data['timestamp'] = datetime.utcnow() # datetime.now() - + if 'policy' not in data: + data['policy'] = self.__environment_variables_dict.get('policy') # Upload data to elastic search server + if 'account' not in map(str.lower, data.keys()): + data['account'] = self.__account try: if isinstance(data, dict): # JSON Object self.__es.index(index=index, doc_type=doc_type, body=data, **kwargs) @@ -207,21 +218,43 @@ def get_query_data_between_range(self, start_datetime: datetime, end_datetime: d return query @typechecked() - @logger_time_stamp - def fetch_data_between_range(self, es_index: str, start_datetime: datetime, end_datetime: datetime): + def fetch_data_by_es_query(self, es_index: str, query: dict = None, start_datetime: datetime = None, + end_datetime: datetime = None, result_agg: bool = False, group_by: str = '', search_size: int = 100, limit_to_size: bool = False, + filter_path: str = ''): """ - This method fetches the data in between range + This method fetches the data in between range, if you need aggregation results pass you own query with aggegation @param es_index: @param start_datetime: @param end_datetime: + @param query: + @param result_agg: + @param group_by: + @param search_size: + @param limit_to_size: limit to size + @param filter_path: @return: """ + es_data = [] if self.__es.indices.exists(index=es_index): - query_body = self.get_query_data_between_range(start_datetime=start_datetime, end_datetime=end_datetime) - data = self.__es.search(index=es_index, body=query_body, doc_type='_doc').get('hits') - if data: - return data['hits'] - return [] + if not query: + if start_datetime and end_datetime: + query = self.get_query_data_between_range(start_datetime=start_datetime, end_datetime=end_datetime) + if query: + response = self.__es.search(index=es_index, body=query, doc_type='_doc', size=search_size, scroll='1h', filter_path=filter_path) + if result_agg: + es_data.extend(response.get('aggregations').get(group_by).get('buckets')) + else: + if response.get('hits').get('hits'): + es_data.extend(response.get('hits').get('hits')) + if not limit_to_size: + scroll_id = response.get('_scroll_id') + while scroll_id: + response = self.__es.scroll(scroll_id=scroll_id, scroll="1h") + if len(response.get('hits').get('hits')) > 0: + es_data.extend(response.get('hits').get('hits')) + else: + break + return es_data @typechecked() @logger_time_stamp @@ -262,3 +295,41 @@ def get_es_data_by_id(self, id: str, index: str): except Exception as err: es_data = {} return es_data + + def upload_data_in_bulk(self, data_items: list, index: str, **kwargs): + """ + This method uploads the data using the bulk api + :param index: + :param data_items: + :return: + """ + total_uploaded = 0 + failed_items = 0 + for i in range(0, len(data_items), self.DEFAULT_ES_BULK_LIMIT): + bulk_items = data_items[i: i + self.DEFAULT_ES_BULK_LIMIT] + for item in bulk_items: + if kwargs.get('id'): + item['_id'] = item.get(kwargs.get('id')) + if not item.get('timestamp'): + item['timestamp'] = datetime.strptime(item.get('CurrentDate'), "%Y-%m-%d") + item['_index'] = index + item['AccountId'] = str(item.get('AccountId')) + item['Policy'] = self.__environment_variables_dict.get('policy') + response = bulk(self.__es, bulk_items) + if response: + total_uploaded += len(bulk_items) + else: + failed_items += len(bulk_items) + if total_uploaded > 0: + logger.info(f"✅️ {total_uploaded} is uploaded to the elastic search index: {index}") + if failed_items > 0: + logger.error(f"❌ {failed_items} is not uploaded to the elasticsearch index: {index}") + + def check_elastic_search_connection(self): + """ + This method returns boolean value on elasticsearch connection + :return: + """ + if self.__es: + return self.__es.ping() + return False diff --git a/cloud_governance/common/google_drive/upload_to_gsheet.py b/cloud_governance/common/google_drive/upload_to_gsheet.py index 0e309e04..404d9af9 100644 --- a/cloud_governance/common/google_drive/upload_to_gsheet.py +++ b/cloud_governance/common/google_drive/upload_to_gsheet.py @@ -38,8 +38,23 @@ def get_cost_center_budget_details(self, account_id: str, dir_path: str = ''): orient='records') if account_row: return account_row[0].get('CostCenter', 0), round( - float(account_row[0].get('Budget', '0').replace(',', '')), 0), str(account_row[0].get('Year')) - return 0, 0, '' + float(account_row[0].get('Budget', '0').replace(',', '')), 0), str(account_row[0].get('Year')), account_row[0].get('Owner') + return 0, 0, '', 'Others' + + def get_monthly_spa(self, month_name: str, dir_path: str = ''): + """This method gets the monthly savings plan amortization""" + with tempfile.TemporaryDirectory() as tmp_dir: + sheet_name = 'ASP' + dirtectory = dir_path if dir_path else tmp_dir + file_path = f'{tmp_dir}/{sheet_name}.csv' if not dir_path else f'{dir_path}/{sheet_name}.csv' + if not os.path.exists(file_path): + self.gsheet_operations.download_spreadsheet(spreadsheet_id=self.__gsheet_id, sheet_name=sheet_name, file_path=dirtectory) + accounts_df = pd.read_csv(file_path) + records = accounts_df.to_dict(orient='records') + for record in records: + if record.get('Month').lower() == month_name.lower(): + return float(record.get('Total')) + return 0 def format_for_updating_the_cells(self, update_data: list, gsheet_data: pd, sheet_name: str, doc_id: str, doc_id2: str = ''): """ diff --git a/cloud_governance/common/jira/jira_exceptions.py b/cloud_governance/common/jira/jira_exceptions.py new file mode 100644 index 00000000..4bb57042 --- /dev/null +++ b/cloud_governance/common/jira/jira_exceptions.py @@ -0,0 +1,10 @@ + + +class JiraExceptions(Exception): + """ + This class is for Jira Exceptions + """ + + def __init__(self, message): + self.message = message + super().__init__(message) diff --git a/cloud_governance/common/jira/jira_operations.py b/cloud_governance/common/jira/jira_operations.py index d081e2a3..a96e247e 100644 --- a/cloud_governance/common/jira/jira_operations.py +++ b/cloud_governance/common/jira/jira_operations.py @@ -1,5 +1,13 @@ import asyncio +import json +import os.path +from datetime import datetime +import typeguard + +from cloud_governance.cloud_resource_orchestration.utils.common_operations import string_equal_ignore_case +from cloud_governance.common.jira.jira_exceptions import JiraExceptions +from cloud_governance.common.logger.init_logger import logger from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp from cloud_governance.common.jira.jira import Jira @@ -12,9 +20,11 @@ class JiraOperations: """ REFINEMENT = 'Refinement' + IN_PROGRESS = 'In Progress' JIRA_TRANSITION_IDS = { - 'NEW': 51, 'REFINEMENT': 61, 'INPROGRESS': 31, 'CLOSED': 41 + 'NEW': 51, 'REFINEMENT': 61, 'INPROGRESS': 31, 'CLOSED': 41, 'ANY': 0 } + FILE_EXTENSION = '.json' def __init__(self): self.__environment_variables_dict = environment_variables.environment_variables_dict @@ -22,63 +32,116 @@ def __init__(self): self.__jira_username = self.__environment_variables_dict.get('JIRA_USERNAME').strip() self.__jira_token = self.__environment_variables_dict.get('JIRA_TOKEN').strip() self.__jira_queue = self.__environment_variables_dict.get('JIRA_QUEUE').strip() + self.__cache_temp_dir = self.__environment_variables_dict.get('TEMPORARY_DIRECTORY', '').strip() self.__loop = asyncio.new_event_loop() self.__jira_object = Jira(url=self.__jira_url, username=self.__jira_username, token=self.__jira_token, ticket_queue=self.__jira_queue, loop=self.__loop) + @typeguard.typechecked @logger_time_stamp - def move_issue_state(self, jira_id: str, state: str): + def move_issue_state(self, ticket_id: str, state: str): """ - This method close the issue + This method move the issue state + :param ticket_id: + :param state: + :return: """ - if '-' in jira_id: - jira_id = jira_id.split('-')[-1] + if '-' in ticket_id: + ticket_id = ticket_id.split('-')[-1] state_id = self.JIRA_TRANSITION_IDS.get(state.upper()) - return self.__loop.run_until_complete(self.__jira_object.post_transition(ticket=jira_id, transition=state_id)) + return self.__loop.run_until_complete(self.__jira_object.post_transition(ticket=ticket_id, transition=state_id)) + @typeguard.typechecked @logger_time_stamp - def get_issue(self, jira_id: str): + def get_issue(self, ticket_id: str): """ This method returns the issue data + :param ticket_id: + :return: + """ + return self.__loop.run_until_complete(self.__jira_object.get_ticket(ticket=ticket_id)) + + @typeguard.typechecked + @logger_time_stamp + def return_cache_ticket_description(self, ticket_id: str): + """ + This method checks ticket id is already fetched from jira api + :param ticket_id: + :return: + """ + with open(f'{self.__cache_temp_dir}/{ticket_id}{self.FILE_EXTENSION}') as cache_ticket_description: + result_data = json.load(cache_ticket_description) + result_data['TicketOpenedDate'] = datetime.strptime(result_data.get('TicketOpenedDate'), "%Y-%m-%d %H:%M:%S") + return result_data + + @typeguard.typechecked + @logger_time_stamp + def cache_ticket_description(self, ticket_id: str, ticket_description: dict): + """ + This method saves the ticket_id description + :param ticket_description: + :param ticket_id: + :return: """ - return self.__loop.run_until_complete(self.__jira_object.get_ticket(ticket=jira_id)) + if self.__cache_temp_dir: + with open(f'{self.__cache_temp_dir}/{ticket_id}{self.FILE_EXTENSION}', 'w') as cache_ticket_description: + json.dump(ticket_description, cache_ticket_description, default=str) + @typeguard.typechecked @logger_time_stamp - def get_issue_description(self, jira_id: str, state: str = '', sub_task: bool = False): - if '-' in jira_id: - jira_id = jira_id.split('-')[-1] - issue_data = self.get_issue(jira_id=jira_id) - if issue_data: - if issue_data['fields']['status']['name'] == self.REFINEMENT or state.upper() == 'ANY' or (state == self.JIRA_TRANSITION_IDS.get('INPROGRESS') and issue_data['fields']['status']['name'] == 'In Progress') or sub_task: - description_list = issue_data['fields']['description'].split('\n') - description_dict = {} - for description in description_list: - if description: - values = description.strip().split(':', 1) - if len(values) == 2: - key, value = values - description_dict[key.strip().replace(' ', '')] = value.strip() - if 'Project' in description_dict: - if description_dict['Project'] == "Other": - description_dict['Project'] = description_dict.get('Explanationof"Other"primaryproduct') - if not description_dict['Project']: - description_dict['Project'] = description_dict.get('Otherproductsbeingtested') - if not description_dict['Project']: - description_dict['Project'] = description_dict.get('Explanationof"Other"secondaryproduct') - return description_dict + def get_issue_description(self, ticket_id: str, state: str = '', sub_task: bool = False): + """ + This method return the ticket data description + :param ticket_id: + :param state: + :param sub_task: + :return: + """ + if '-' in ticket_id: + ticket_id = ticket_id.split('-')[-1] + if os.path.exists(f'{self.__cache_temp_dir}/{ticket_id}{self.FILE_EXTENSION}'): + return self.return_cache_ticket_description(ticket_id=ticket_id) + else: + issue_data = self.get_issue(ticket_id=ticket_id) + if issue_data: + if issue_data['fields']['status']['name'] == self.REFINEMENT or self.JIRA_TRANSITION_IDS.get(state, -1) == 0 or (state == 'INPROGRESS' and issue_data['fields']['status']['name'] == self.IN_PROGRESS) or sub_task: + description_list = issue_data['fields']['description'].split('\n') + description_dict = {} + for description in description_list: + if description: + values = description.strip().split(':', 1) + if len(values) == 2: + key, value = values + description_dict[key.strip().replace(' ', '')] = value.strip() + if 'Project' in description_dict: + if description_dict['Project'] == "Other": + description_dict['Project'] = description_dict.get('Explanationof"Other"primaryproduct') + if not description_dict['Project']: + description_dict['Project'] = description_dict.get('Otherproductsbeingtested') + if not description_dict['Project']: + description_dict['Project'] = description_dict.get('Explanationof"Other"secondaryproduct') + description_dict['TicketOpenedDate'] = datetime.strptime(issue_data.get('fields').get('created').split('.')[0], "%Y-%m-%dT%H:%M:%S") + description_dict['JiraStatus'] = issue_data['fields']['status']['name'] + self.cache_ticket_description(ticket_id=ticket_id, ticket_description=description_dict) + return description_dict return {} @logger_time_stamp def get_all_issues_in_progress(self): - """This method get all issues which are in progress""" + """ + This method get all issues which are in progress + :return: + """ issues = self.__loop.run_until_complete(self.__jira_object.search_tickets(query={'Status': "'IN PROGRESS'"})).get('issues') - jira_ids = {} + ticket_ids = {} for issue in issues: if '[Clouds]' in issue['fields']['summary']: - jira_id = issue.get('key') + ticket_id = issue.get('key') description = self.beautify_issue_description(issue['fields']['description']) - jira_ids[jira_id] = description.get('Region') - return jira_ids + description['TicketOpenedDate'] = datetime.strptime(issue.get('fields').get('created').split('.')[0], "%Y-%m-%dT%H:%M:%S") + ticket_ids[ticket_id] = description.get('Region') + return ticket_ids + @logger_time_stamp def beautify_issue_description(self, description): """ This method beautify the issue description @@ -94,28 +157,170 @@ def beautify_issue_description(self, description): description_data[index] = line.strip() return description_data + @typeguard.typechecked @logger_time_stamp - def get_jira_id_sub_tasks(self, jira_id: str, closed: bool = False): - """This method returns th list of subtasks""" - jira_id = jira_id.split("-")[-1] - jira_data = self.__loop.run_until_complete(self.__jira_object.get_ticket(ticket=jira_id)) + def get_ticket_id_sub_tasks(self, ticket_id: str, closed: bool = False): + """ + This method returns th list of subtasks + :param ticket_id: + :param closed: + :return: + """ + ticket_id = ticket_id.split("-")[-1] + jira_data = self.get_issue(ticket_id=ticket_id) if jira_data: sub_tasks_ids = [] - sub_tasks = jira_data.get('fields').get('subtasks') - for sub_task in sub_tasks: - fields = sub_task.get('fields') - if fields.get('status').get('name') != 'Closed' or closed: - sub_tasks_ids.append(sub_task.get('key')) + sub_tasks = jira_data.get('fields', {}).get('subtasks', {}) + if sub_tasks: + for sub_task in sub_tasks: + fields = sub_task.get('fields') + if fields.get('status').get('name') != 'Closed' or closed: + sub_tasks_ids.append(sub_task.get('key')) return sub_tasks_ids return [] - def get_issue_sub_tasks_cost_estimation(self, jira_id: str): - """This method get issue cost estimation""" - sub_tasks = self.get_jira_id_sub_tasks(jira_id=jira_id, closed=True) + @typeguard.typechecked + @logger_time_stamp + def get_issue_sub_tasks_cost_estimation(self, ticket_id: str): + """ + This method get issue cost estimation + :param ticket_id: + :return: + """ + sub_tasks = self.get_ticket_id_sub_tasks(ticket_id=ticket_id, closed=True) cost_estimation = 0 for sub_task in sub_tasks: - description = self.get_issue_description(jira_id=sub_task, sub_task=True) + description = self.get_issue_description(ticket_id=sub_task, sub_task=True) cost_estimation += float(description.get('CostEstimation', 0)) return cost_estimation + @typeguard.typechecked + def get_issue_sub_tasks_duration(self, ticket_id: str): + """ + This method return the issue sub-tasks total duration + :param ticket_id: + :return: + """ + sub_tasks = self.get_ticket_id_sub_tasks(ticket_id=ticket_id, closed=True) + total_duration = 0 + for sub_task in sub_tasks: + description = self.get_issue_description(ticket_id=sub_task, sub_task=True) + total_duration += int(description.get('Days', 0)) + return total_duration + + @typeguard.typechecked + @logger_time_stamp + def get_all_issues(self, ticket_status: str): + """ + This method get all issues which are in progress + :param ticket_status: + :return: + """ + issues = self.__loop.run_until_complete( + self.__jira_object.search_tickets(query={'Status': f"'{ticket_status}'"})).get('issues') + ticket_ids = {} + for issue in issues: + if '[Clouds]' in issue['fields']['summary']: + ticket_id = issue.get('key') + description = self.beautify_issue_description(issue['fields']['description']) + description['TicketOpenedDate'] = datetime.strptime(issue.get('fields').get('created').split('.')[0], "%Y-%m-%dT%H:%M:%S") + ticket_ids[ticket_id] = description + return ticket_ids + + def __check_ticket_state(self, ticket_state: str): + """ + This method checks ticket_state present in the JIRA_TRANSITION_IDS + :param ticket_state: + :return: + """ + if ticket_state.upper() not in self.JIRA_TRANSITION_IDS.keys(): + raise JiraExceptions(f'UnDefined value {ticket_state}, accepted values {self.JIRA_TRANSITION_IDS.keys()}') + return True + + def __get_ids_from_sub_task_data(self, sub_tasks_data: list, ticket_state: str, check_summary: str): + """ + This method subtasks id list by specific condition on summary + :param ticket_state: + :param check_summary: + :param sub_tasks_data: + :return: + """ + sub_tasks_ids = [] + if sub_tasks_data: + for sub_task in sub_tasks_data: + summary = sub_task.get('fields', {}).get('summary') + if summary and check_summary in summary: + fields = sub_task.get('fields', {}) + status = fields.get('status', {}).get('name').replace(' ', '') + if string_equal_ignore_case(ticket_state, status): + sub_tasks_ids.append(sub_task.get('key').split('-')[-1]) + return sub_tasks_ids + + def get_all_subtasks_ticket_ids(self, ticket_id: str, ticket_state: str, check_summary: str = ''): + """ + This method returns all the sub-tasks ids based on check_summary + check_string will be validated against + :param ticket_id: + :param ticket_state: + :param check_summary: + :return: + """ + if self.__check_ticket_state(ticket_state=ticket_state): + ticket_id = ticket_id.split("-")[-1] + jira_data = self.get_issue(ticket_id=ticket_id) + sub_tasks_ids = [] + if jira_data: + sub_tasks = jira_data.get('fields', {}).get('subtasks', {}) + if sub_tasks: + sub_tasks_ids = self.__get_ids_from_sub_task_data(sub_tasks_data=sub_tasks, + ticket_state=ticket_state, + check_summary=check_summary) + else: + logger.warn(f'No sub-tasks found for the TicketId: {ticket_id}') + return sub_tasks_ids + + def get_budget_extend_tickets(self, ticket_id: str, ticket_state: str): + """ + This method returns the budget extension tickets of ticket_id + :return: + """ + check_summary = 'Budget Extension' + return self.get_all_subtasks_ticket_ids(ticket_id=ticket_id, ticket_state=ticket_state, + check_summary=check_summary) + + def get_duration_extend_tickets(self, ticket_id: str, ticket_state: str): + """ + This method returns the duration extension tickets of ticket_id + :return: + """ + check_summary = 'Duration Extension' + return self.get_all_subtasks_ticket_ids(ticket_id=ticket_id, ticket_state=ticket_state, + check_summary=check_summary) + + def get_total_extend_budget(self, sub_ticket_ids: list): + """ + This method return total budget for extension + :param sub_ticket_ids: + :return: + """ + total_budget_to_extend = 0 + for sub_ticket_id in sub_ticket_ids: + description = self.get_issue_description(ticket_id=sub_ticket_id, sub_task=True) + extended_budget = int(description.get('Budget', 0)) + if extended_budget == 0: + extended_budget = int(description.get('CostEstimation', 0)) + total_budget_to_extend += extended_budget + return total_budget_to_extend + + def get_total_extend_duration(self, sub_ticket_ids: list): + """ + This method returns the total duration for extension + :param sub_ticket_ids: + :return: + """ + total_duration = 0 + for sub_ticket_id in sub_ticket_ids: + description = self.get_issue_description(ticket_id=sub_ticket_id, sub_task=True) + total_duration += int(description.get('Days', 0)) + return total_duration diff --git a/cloud_governance/common/ldap/ldap_search.py b/cloud_governance/common/ldap/ldap_search.py index 55fcd785..6d4dd8d4 100644 --- a/cloud_governance/common/ldap/ldap_search.py +++ b/cloud_governance/common/ldap/ldap_search.py @@ -18,7 +18,7 @@ def __get_manager_name(self, manager_data: str): """ try: manager_id = manager_data.replace('=', ':').split(',')[0].split(':')[-1] - manager_details = self.get_details(user_name=manager_id) + manager_details = self.__get_details(user_name=manager_id) return str(manager_details.get('cn')[0], 'UTF-8'), manager_id except Exception as err: return [] @@ -34,11 +34,13 @@ def __organise_user_details(self, data: dict): manager_name, manager_id = self.__get_manager_name(manager_data=str(data['manager'][0], 'UTF-8')) user_data['managerName'] = manager_name user_data['managerId'] = manager_id + user_data['ManagerName'] = manager_name + user_data['ManagerId'] = manager_id return user_data except Exception as err: return [] - def get_details(self, user_name: str): + def __get_details(self, user_name: str): """ This method get the user data from ldap @param user_name: @@ -64,5 +66,5 @@ def get_user_details(self, user_name): @param user_name: @return: """ - user_data = self.get_details(user_name=user_name) + user_data = self.__get_details(user_name=user_name) return self.__organise_user_details(data=user_data) diff --git a/cloud_governance/common/logger/init_logger.py b/cloud_governance/common/logger/init_logger.py index 2cdc5831..ae4af69c 100644 --- a/cloud_governance/common/logger/init_logger.py +++ b/cloud_governance/common/logger/init_logger.py @@ -1,5 +1,4 @@ import sys -import os import logging diff --git a/cloud_governance/common/logger/logger_time_stamp.py b/cloud_governance/common/logger/logger_time_stamp.py index a9fe2da1..259b5983 100644 --- a/cloud_governance/common/logger/logger_time_stamp.py +++ b/cloud_governance/common/logger/logger_time_stamp.py @@ -24,13 +24,14 @@ def method_wrapper(*args, **kwargs): time_start = time.time() date_time_start = datetime.datetime.now().strftime(datetime_format) try: - logger.info(f'Method name: {method.__name__} {kwargs} , Start time: {date_time_start} ') + logger.warn(f'Method name: {method.__name__} , Start time: {date_time_start} ') + logger.info(f'Method name: {method.__name__} {kwargs}') result = method(*args, **kwargs) time_end = time.time() date_time_end = datetime.datetime.now().strftime(datetime_format) total_time = time_end - time_start total_time_str = f'Total time: {round(total_time, 2)} sec' - logger.info(f'Method name: {method.__name__} , End time: {date_time_end} , {total_time_str}') + logger.warn(f'Method name: {method.__name__} , End time: {date_time_end} , {total_time_str}') except Exception as err: time_end = time.time() total_time = time_end - time_start diff --git a/cloud_governance/common/mails/mail_message.py b/cloud_governance/common/mails/mail_message.py index 097e7bf7..ef3d80b7 100644 --- a/cloud_governance/common/mails/mail_message.py +++ b/cloud_governance/common/mails/mail_message.py @@ -1,15 +1,42 @@ +import os.path +from jinja2 import Environment, FileSystemLoader + +from cloud_governance.common.ldap.ldap_search import LdapSearch from cloud_governance.main.environment_variables import environment_variables class MailMessage: - RESTRICTION = 'Do not reply this email. If you need more clarification, please reach out to us in the CoreOS slack channel - #perf-dept-public-clouds.' + + RESTRICTION = 'Do not reply this email. If you need more information, please reach out to us in the slack channel - #perf-dept-public-clouds.' + FOOTER = '
---
Thanks,
Cloud GovernanceTeam
' def __init__(self): self.__environment_variables_dict = environment_variables.environment_variables_dict - self.account = self.__environment_variables_dict.get('account', '') + self.account = self.__environment_variables_dict.get('account', '').upper() self.policy = self.__environment_variables_dict.get('policy', '') self.region = self.__environment_variables_dict.get('AWS_DEFAULT_REGION', '') + self.__LDAP_HOST_NAME = self.__environment_variables_dict.get('LDAP_HOST_NAME') + self.__ldap_search = LdapSearch(ldap_host_name=self.__LDAP_HOST_NAME) + self.__public_cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME') + self.__portal = self.__environment_variables_dict.get('CRO_PORTAL', '') + self.__cro_duration_days = self.__environment_variables_dict.get('CRO_DURATION_DAYS') + self.__LDAP_HOST_NAME = self.__environment_variables_dict.get('LDAP_HOST_NAME') + self.__ldap_search = LdapSearch(ldap_host_name=self.__LDAP_HOST_NAME) + self.__templates_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates') + self.env_loader = Environment(loader=FileSystemLoader(self.__templates_path)) + + def get_user_ldap_details(self, user_name: str): + """ + This method return user details from ldap + :param user_name: + :return: + """ + user_details = self.__ldap_search.get_user_details(user_name=user_name) + if user_details: + return user_details.get('displayName') + else: + return None def ec2_stop(self, name: str, days: int, image_id: str, delete_instance_days: int, instance_name: str, resource_id: str, stopped_time: str, ec2_type: str, **kwargs): @@ -227,52 +254,250 @@ def ibm_cost_over_usage(self, data: str, month: int, year: int): """.strip() return subject, body - def get_long_run_alert(self, days: int, user: str, jira_id: str): + def cro_monitor_alert_message(self, days: int, user: str, ticket_id: str): """ - This method return the LongRun, second Alert Message + This method return the CRO Alert Message + :param days: + :param user: + :param ticket_id: + :return: """ - subject = f'Cloud LongRun Alert: Expiring in {days} days' + ticket_id = ticket_id.split('-')[-1] + subject = f'[Action required] Cloud Resources Budget request Ticket Expiring in {days} days' + user_display_name = self.get_user_ldap_details(user_name=user) body = f"""
-

Hi {user},

+

Hi {user_display_name},

-

This is a message to alert you that in {days} days, the cloud request is expiring.

-

Please take an action. If you are not using the instances Terminate the instances.

-

Refer to the Jira issue: {jira_id}

-

Visit the wiki page to get more information

-
- + {self.FOOTER} """.strip() return subject, body - def get_long_run_expire_alert(self, user: str, jira_id: str): + def cro_cost_over_usage(self, **kwargs): """ - This method return the LongRun, Expire Alert Message + This method returns the subject, body for cost over usage + :param kwargs: + :return: """ - subject = f'LongRun Alert: Expired' + cloud_name = kwargs.get('CloudName', 'NA').upper() + over_usage_cost = kwargs.get('OverUsageCost', 'NA') + full_name = kwargs.get('FullName', '') + if not full_name: + full_name = kwargs.get('to') + user_cost = round(kwargs.get('Cost', 0), 3) + subject = f' [Action required]: Cloud Resources Open Budget Request' + if user_cost > over_usage_cost: + message = f"it's over $ {over_usage_cost}." + else: + message = f"it may over $ {over_usage_cost} in next few days." body = f""" -
-

Hi {user},

-
-
-

This is a message to alert you that the cloud long run request is expired.

-

Please take an action. If you are not using the instances Terminate the instances.

-

Refer to the Jira issue: {jira_id}

-

Visit the wiki page to get more information

-
- - """.strip() +
+ Hi {full_name}, +

+
+ Your {cloud_name} cost usage in the last {self.__cro_duration_days} days is $ {user_cost} and {message}
+ You must open the project ticket in the following Link.
+ After submitting a ticket, you must add Tag (TicketId:#) to every active resource that is related to the project ticket.
+ + If you have any questions, please let us know in slack channel #perf-dept-public-clouds +


+ {self.FOOTER} +""" + return subject, body + + def cro_request_for_manager_approval(self, manager: str, request_user: str, cloud_name: str, ticket_id: str, description: dict, **kwargs): + """ + This method returns the message for manager, regarding user approval + :param description: + :param ticket_id: + :param manager: + :param request_user: + :param cloud_name: + :return: + """ + subject = '[Action required]: Cloud Resources Budget Request Approval' + manager_full_name = self.get_user_ldap_details(user_name=manager) + user_full_name = self.get_user_ldap_details(user_name=request_user) + ticket_id = ticket_id.split('-')[-1] + context = {'manager': manager, 'manager_full_name': manager_full_name, 'user_full_name': user_full_name, + 'ticket_id': ticket_id, 'portal': self.__portal, 'request_user': request_user, 'description': description, + 'footer': self.FOOTER} + template_loader = self.env_loader.get_template('cro_request_for_manager_approval.j2') + context['extra_message'] = kwargs.get('extra_message', '') + body = template_loader.render(context) + return subject, body + + def cro_send_user_alert_to_add_tags(self, user: str, ticket_ids: list): + """ + This method return the subject, body for adding tags + :param user: + :param ticket_ids: + :return: + """ + subject = '[Action required]: Add TicketId tag' + ticket_ids = "\n".join([f"
  • {val}
  • " for idx, val in enumerate(ticket_ids)]) + user_display_name = self.get_user_ldap_details(user_name=user) + body = f""" +
    Hi {user_display_name},
    +

    You have the following Approved JIRA Ticket-Ids

    +
      {ticket_ids}

    + Currently, there are several instances running over budget, kindly review and tag instances with TicketId: #

    +
    Please find the below attached document.
    +

    + {self.FOOTER} + """ + return subject, body + + def cro_send_closed_alert(self, user: str, ticket_id: str): + """ + This method send cro ticket close alert + :param user: + :param ticket_id: + :return: + """ + subject = 'Closing Cloud Budget Request ticket' + ticket_id = ticket_id.split('-')[-1] + user_full_name = self.get_user_ldap_details(user_name=user) + body = f""" +
    Hi {user_full_name},

    +
    + Your cloud budget request ( TicketId: {ticket_id} ) duration expired and the ticket auto closed.
    + You can find the summary in Portal.
    +


    + {self.FOOTER} + """ + return subject, body + + def filter_resources_on_days(self, resources: dict): + """ + This method return the resources based on the days + :param resources: + :param days: + :return: + """ + resources_by_days = {} + for policy_name, resource_data in resources.items(): + for region_name, policy_region_data in resource_data.items(): + for data_item in policy_region_data: + resources_by_days.setdefault(data_item.get('Days'), []).append(data_item) + return resources_by_days + + def get_data_in_html_table_format(self, resources: dict): + """ + This method returns user policy alerts in HTML table format + :param resources: + :return: + """ + style = """ + + """ + html_table_format = f"""{style}""" + thead_values = ['Policy', 'Region', 'ResourceId', 'Name', 'Action', 'DeletedDay'] + th_elements = ''.join([f'' for value in thead_values]) + html_table_format += f'{th_elements}' + for days, resource_data in resources.items(): + resource_data = sorted(resource_data, key=lambda item: (item.get('Policy'), item.get('Region'))) + for resource in resource_data: + html_table_format += '' + for th_value in thead_values: + if 'Deleted' == resource.get(th_value): + html_table_format += f"" + else: + html_table_format += f"""""" + html_table_format += '' + html_table_format += '
    {value}
    {resource.get(th_value)} 🗑{resource.get(th_value)}
    ' + return html_table_format + + def get_agg_policies_mail_message(self, user: str, user_resources: dict): + """ + This method returns the message for the aggregated alert of all policies + :param user: + :param user_resources: + :return: + """ + display_name = self.get_user_ldap_details(user_name=user) + resources_by_days = self.filter_resources_on_days(resources=user_resources) + table_data = self.get_data_in_html_table_format(resources=resources_by_days) + display_name = display_name if display_name else user + subject = f'Cloud Governance: Policy Alerts' + body = f""" +
    +

    Hi {display_name},

    +
    +
    +

    You can find below your unused resources in the {self.__public_cloud_name} account ({self.account}).

    +

    If you want to keep them, please add "Policy=Not_Delete" or "Policy=skip" tag for each resource

    + {table_data} +
    +

    {self.RESTRICTION}

    + {self.FOOTER} +""" + return subject, body + + def cro_monitor_budget_remain_alert(self, ticket_id: str, budget: int, user: str, used_budget: int, remain_budget: int): + """ + This method returns subject, body for the budget remain alert + :param ticket_id: + :param budget: + :param user: + :param used_budget: + :param remain_budget: + :return: + """ + ticket_id = ticket_id.split('-')[-1] + subject = f'[Action required] Cloud Resources Budget Remain' + user_display_name = self.get_user_ldap_details(user_name=user) + template_loader = self.env_loader.get_template('cro_monitor_budget_remain_alert.j2') + context = {'name': user_display_name, 'ticket_id': ticket_id, 'portal': self.__portal, + 'budget': budget, 'used_budget': used_budget, 'remain_budget': remain_budget, 'footer': self.FOOTER} + body = template_loader.render(context) + return subject, body + + def cro_monitor_budget_remain_high_alert(self, ticket_id: str, budget: int, user: str, used_budget: int, remain_budget: int): + """ + This method returns subject, body for the budget completed high alert + :param ticket_id: + :param budget: + :param user: + :param used_budget: + :param remain_budget: + :return: + """ + ticket_id = ticket_id.split('-')[-1] + subject = f'[Action required] Cloud Resources Budget Remain' + user_display_name = self.get_user_ldap_details(user_name=user) + template_loader = self.env_loader.get_template('cro_monitor_budget_remain_high_alert.j2') + context = {'name': user_display_name, 'ticket_id': ticket_id, 'portal': self.__portal, + 'budget': budget, 'used_budget': used_budget, 'remain_budget': remain_budget, + 'footer': self.FOOTER} + body = template_loader.render(context) return subject, body diff --git a/cloud_governance/common/mails/postfix.py b/cloud_governance/common/mails/postfix.py index 266b9d3d..4cc9191c 100644 --- a/cloud_governance/common/mails/postfix.py +++ b/cloud_governance/common/mails/postfix.py @@ -34,6 +34,10 @@ def __init__(self): self.__es_port = self.__environment_variables_dict.get('es_port', '') self.__account = self.__environment_variables_dict.get('account', '') self.__policy_output = self.__environment_variables_dict.get('policy_output', '') + self.__default_admins = self.__environment_variables_dict.get('DEFAULT_ADMINS') + self.__email_alert = self.__environment_variables_dict.get('EMAIL_ALERT') + self.__mail_to = self.__environment_variables_dict.get('EMAIL_TO') # testing purposes + self.__mail_cc = self.__environment_variables_dict.get('EMAIL_CC') self.bucket_name, self.key = self.get_bucket_name() self.__es_index = 'cloud-governance-mail-messages' if self.__es_host: @@ -53,62 +57,72 @@ def get_bucket_name(self): @logger_time_stamp def send_email_postfix(self, subject: str, to: any, cc: list, content: str, **kwargs): - msg = MIMEMultipart('alternative') - msg["Subject"] = subject - msg["From"] = "%s <%s>" % ( - 'cloud-governance', - "@".join(["noreply-cloud-governance", 'redhat.com']), - ) - if isinstance(to, str): - msg["To"] = "@".join([to, 'redhat.com']) - elif isinstance(to, list): - msg["To"] = ", ".join(to) - msg["Cc"] = ",".join(cc) - # msg.add_header("Reply-To", self.reply_to) - # msg.add_header("User-Agent", self.reply_to) - if kwargs.get('filename'): - attachment = MIMEText(open(kwargs['filename']).read()) - attachment.add_header('Content-Disposition', 'attachment', - filename=kwargs['filename'].split('/')[-1]) - msg.attach(attachment) - if kwargs.get('mime_type'): - msg.attach(MIMEText(content, kwargs.get('mime_type'))) - else: - msg.attach(MIMEText(content)) - email_string = msg.as_string() - email_host = 'localhost' - try: - with smtplib.SMTP(email_host) as s: - try: - logger.debug(email_string) - s.send_message(msg) - if isinstance(to, str): - logger.info(f'Mail sent successfully to {to}@redhat.com') - elif isinstance(to, list): - logger.info(f'Mail sent successfully to {", ".join(to)}@redhat.com') - if kwargs.get('filename'): - file_name = kwargs['filename'].split('/')[-1] - date_key = datetime.datetime.now().strftime("%Y%m%d%H") - if self.__policy_output: - self.__s3_operations.upload_file(file_name_path=kwargs['filename'], - bucket=self.bucket_name, key=f'{self.key}/{self.__policy}/{date_key}', - upload_file=file_name) - s3_path = f'{self.__policy_output}/logs/{self.__policy}/{date_key}/{file_name}' - content += f'\n\nresource_file_path: s3://{s3_path}\n\n' - data = {'Policy': self.__policy, 'To': to, 'Cc': cc, 'Message': content, 'Account': self.__account.upper(), 'MessageType': kwargs.get('message_type')} - if kwargs.get('resource_id'): - data['resource_id'] = kwargs['resource_id'] - if kwargs.get('extra_purse'): - data['extra_purse'] = round(kwargs['extra_purse'], 3) - if self.__es_host: - self.__es_operations.upload_to_elasticsearch(data=data, index=self.__es_index) - logger.info(f'Uploaded to es index: {self.__es_index}') - else: - logger.info('Error missing the es_host') - except smtplib.SMTPException as ex: - logger.info(f'Error while sending mail, {ex}') - return False - return True - except Exception as err: - logger.info(f'Some error occurred, {err}') - return False + if self.__email_alert: + if self.__mail_to: + to = self.__mail_to + if self.__mail_cc: + cc = self.__mail_cc + cc = [cc_user for cc_user in cc if to and to not in cc_user] + cc = [cc_user if '@redhat.com' in cc_user else f'{cc_user}@redhat.com' for cc_user in cc] + msg = MIMEMultipart('alternative') + msg["Subject"] = subject + msg["From"] = "%s <%s>" % ( + 'cloud-governance', + "@".join(["noreply-cloud-governance", 'redhat.com']), + ) + if isinstance(to, str): + msg["To"] = "@".join([to, 'redhat.com']) + elif isinstance(to, list): + msg["To"] = ", ".join(to) + msg["Cc"] = ",".join(cc) + # msg.add_header("Reply-To", self.reply_to) + # msg.add_header("User-Agent", self.reply_to) + if kwargs.get('filename'): + attachment = MIMEText(open(kwargs['filename']).read()) + attachment.add_header('Content-Disposition', 'attachment', + filename=kwargs['filename'].split('/')[-1]) + msg.attach(attachment) + if kwargs.get('mime_type'): + msg.attach(MIMEText(content, kwargs.get('mime_type'))) + else: + msg.attach(MIMEText(content)) + email_string = msg.as_string() + email_host = 'localhost' + try: + with smtplib.SMTP(email_host) as s: + try: + logger.debug(email_string) + s.send_message(msg) + if isinstance(to, str): + logger.warn(f'Mail sent successfully to {to}@redhat.com') + elif isinstance(to, list): + logger.warn(f'Mail sent successfully to {", ".join(to)}@redhat.com') + if kwargs.get('filename'): + file_name = kwargs['filename'].split('/')[-1] + date_key = datetime.datetime.now().strftime("%Y%m%d%H") + if self.__policy_output: + self.__s3_operations.upload_file(file_name_path=kwargs['filename'], + bucket=self.bucket_name, key=f'{self.key}/{self.__policy}/{date_key}', + upload_file=file_name) + s3_path = f'{self.__policy_output}/logs/{self.__policy}/{date_key}/{file_name}' + content += f'\n\nresource_file_path: s3://{s3_path}\n\n' + es_data = kwargs.get('es_data') + data = {'Policy': self.__policy, 'To': to, 'Cc': cc, 'Message': content, 'Account': self.__account.upper(), 'MessageType': kwargs.get('message_type', 'alert')} + if es_data: + data.update(es_data) + if kwargs.get('resource_id'): + data['resource_id'] = kwargs['resource_id'] + if kwargs.get('extra_purse'): + data['extra_purse'] = round(kwargs['extra_purse'], 3) + if self.__es_host: + self.__es_operations.upload_to_elasticsearch(data=data, index=self.__es_index) + logger.warn(f'Uploaded to es index: {self.__es_index}') + else: + logger.warn('Error missing the es_host') + except smtplib.SMTPException as ex: + logger.error(f'Error while sending mail, {ex}') + return False + return True + except Exception as err: + logger.error(f'Some error occurred, {err}') + return False diff --git a/cloud_governance/common/mails/templates/cro_monitor_budget_remain_alert.j2 b/cloud_governance/common/mails/templates/cro_monitor_budget_remain_alert.j2 new file mode 100644 index 00000000..6967b1a5 --- /dev/null +++ b/cloud_governance/common/mails/templates/cro_monitor_budget_remain_alert.j2 @@ -0,0 +1,12 @@ +
    +

    Hi {{name}},

    +
    +
    +

    You completed {{used_budget}} / {{budget}} of budget till today of the TicketId: {{ticket_id}}. + Remaining budget will be {{remain_budget}}.

    +

    Open the budget extension if further needed.

    +

    This ticket will be closed, if the budget will exceeds 110%.

    +

    You can extend the budget here.

    +

    Visit the wiki page to get more information

    +
    +{{footer}} diff --git a/cloud_governance/common/mails/templates/cro_monitor_budget_remain_high_alert.j2 b/cloud_governance/common/mails/templates/cro_monitor_budget_remain_high_alert.j2 new file mode 100644 index 00000000..06cf2176 --- /dev/null +++ b/cloud_governance/common/mails/templates/cro_monitor_budget_remain_high_alert.j2 @@ -0,0 +1,11 @@ +
    +

    Hi {{name}},

    +
    +
    +

    You completed {{used_budget}} / {{budget}} of budget till today of the TicketId: {{ticket_id}}. + You exceed budget {{-1*remain_budget}} more.

    +

    Open the budget extension if further needed.

    +

    You can extend the budget here.

    +

    Visit the wiki page to get more information

    +
    +{{footer}} diff --git a/cloud_governance/common/mails/templates/cro_request_for_manager_approval.j2 b/cloud_governance/common/mails/templates/cro_request_for_manager_approval.j2 new file mode 100644 index 00000000..bfc54109 --- /dev/null +++ b/cloud_governance/common/mails/templates/cro_request_for_manager_approval.j2 @@ -0,0 +1,18 @@ +
    Hi {{manager_full_name}},

    +{%if extra_message %}
    {{extra_message}}
    {%endif%}
    +
    + {{user_full_name}} is waiting for your project cloud budget approval
    + Please approve the request in the following url {{portal}}
    +

    TicketId: {{ticket_id}}

    +

    + Description of the New Request +
    + {% for key, value in description.items() %} + {{key}}: {{value}}
    + {% endfor %}
    +
    + If you are not able to approve from the site.
    + Please click here to send mail approve, Approve request or + Reject Request.
    + Note: Highly recommend to use site for approving requests
    +{{footer}} diff --git a/cloud_governance/common/pandas/__init__.py b/cloud_governance/common/pandas/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloud_governance/common/pandas/pandas_operations.py b/cloud_governance/common/pandas/pandas_operations.py new file mode 100644 index 00000000..1559b267 --- /dev/null +++ b/cloud_governance/common/pandas/pandas_operations.py @@ -0,0 +1,55 @@ +import tempfile + +import pandas as pd +import typeguard + +from cloud_governance.common.clouds.aws.s3.s3_operations import S3Operations +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp + + +class PandasOperations: + """ + This class performs the pandas operations + """ + CHUNK_SIZE = 5000 + + def __init__(self, region_name: str = 'us-east-1'): + self.__s3_operations = S3Operations(region_name=region_name) + + @typeguard.typechecked + @logger_time_stamp + def get_dataframe_from_csv_file(self, file_path: str): + """ + This method returns the pandas dataframe from the csv file + :param file_path: + :return: + """ + dataframes = [] + for data_chunk in pd.read_csv(filepath_or_buffer=file_path, chunksize=self.CHUNK_SIZE): + dataframes.append(data_chunk) + dataframe = pd.concat(dataframes, ignore_index=True) + return dataframe + + @typeguard.typechecked + @logger_time_stamp + def get_dataframe_from_s3_file(self, bucket: str, key: str, download_file: str): + """ + This method returns the pandas dataframe from the s3 file + :return: + """ + if not self.__s3_operations.file_exist(bucket=bucket, key=key, file_name=download_file): + raise FileNotFoundError(f"{key}/{download_file} path is not exists else verify your credentials") + with tempfile.NamedTemporaryFile(suffix='.csv', mode='w') as file_name: + self.__s3_operations.download_file(bucket=bucket, key=key, download_file=download_file, + file_name_path=file_name.name) + return self.get_dataframe_from_csv_file(file_path=file_name.name) + + @typeguard.typechecked + @logger_time_stamp + def get_data_dictonary_from_dataframe(self, dataframe: pd.DataFrame): + """ + This method returns the dataframe format to dictonary order + :param dataframe: + :return: + """ + return dataframe.to_dict(orient='records') diff --git a/cloud_governance/main/environment_variables.py b/cloud_governance/main/environment_variables.py index 6564f5e8..a50d8ef6 100644 --- a/cloud_governance/main/environment_variables.py +++ b/cloud_governance/main/environment_variables.py @@ -1,6 +1,12 @@ +import argparse import os -from cloud_governance.common.clouds.aws.iam.iam_operations import IAMOperations +import tempfile +from ast import literal_eval + +import boto3 + +from cloud_governance.main.environment_variables_exceptions import ParseFailed class EnvironmentVariables: @@ -31,24 +37,30 @@ def __init__(self): ################################################################################################## # dynamic parameters - configure for local run # parameters for running policies - self._environment_variables_dict['account'] = EnvironmentVariables.get_env('account', '').upper() + self._environment_variables_dict['account'] = EnvironmentVariables.get_env('account', '').upper().strip() self._environment_variables_dict['AWS_DEFAULT_REGION'] = EnvironmentVariables.get_env('AWS_DEFAULT_REGION', '') - + self._environment_variables_dict['log_level'] = EnvironmentVariables.get_env('log_level', 'INFO') + self._environment_variables_dict['PRINT_LOGS'] = EnvironmentVariables.get_boolean_from_environment('PRINT_LOGS', True) + if not self._environment_variables_dict['AWS_DEFAULT_REGION']: + self._environment_variables_dict['AWS_DEFAULT_REGION'] = 'us-east-2' + self._environment_variables_dict['PUBLIC_CLOUD_NAME'] = EnvironmentVariables.get_env('PUBLIC_CLOUD_NAME', 'AWS') if EnvironmentVariables.get_env('AWS_ACCESS_KEY_ID', '') and EnvironmentVariables.get_env('AWS_SECRET_ACCESS_KEY', ''): - self.iam_operations = IAMOperations() - self._environment_variables_dict['account'] = self.iam_operations.get_account_alias_cloud_name()[0].upper() - + self._environment_variables_dict['account'] = self.get_aws_account_alias_name().upper().replace('OPENSHIFT-', '') self._environment_variables_dict['policy'] = EnvironmentVariables.get_env('policy', '') self._environment_variables_dict['aws_non_cluster_policies'] = ['ec2_idle', 'ec2_stop', 'ec2_run', 'ebs_in_use', 'ebs_unattached', 's3_inactive', 'empty_roles', 'ip_unattached', - 'nat_gateway_unused', + 'unused_nat_gateway', 'zombie_snapshots', 'skipped_resources', 'monthly_report'] - self._environment_variables_dict['cost_policies'] = ['cost_explorer', 'cost_over_usage', 'cost_billing_reports', 'cost_explorer_payer_billings'] + es_index = 'cloud-governance-policy-es-index' + self._environment_variables_dict['cost_policies'] = ['cost_explorer', 'cost_over_usage', 'cost_billing_reports', + 'cost_explorer_payer_billings', 'spot_savings_analysis'] self._environment_variables_dict['ibm_policies'] = ['tag_baremetal', 'tag_vm', 'ibm_cost_report', 'ibm_cost_over_usage'] + if self._environment_variables_dict['policy'] in self._environment_variables_dict['cost_policies']: + es_index = 'cloud-governance-global-cost-billing-index' # AWS env vars self._environment_variables_dict['resource_name'] = EnvironmentVariables.get_env('resource_name', '') @@ -64,6 +76,7 @@ def __init__(self): self._environment_variables_dict['service_type'] = EnvironmentVariables.get_env('service_type', '') self._environment_variables_dict['TABLE_NAME'] = EnvironmentVariables.get_env('TABLE_NAME', '') self._environment_variables_dict['REPLACE_ACCOUNT_NAME'] = EnvironmentVariables.get_env('REPLACE_ACCOUNT_NAME', '{}') + self._environment_variables_dict['DAYS_TO_DELETE_RESOURCE'] = int(EnvironmentVariables.get_env('DAYS_TO_DELETE_RESOURCE', '7')) # AWS Cost Explorer tags self._environment_variables_dict['cost_metric'] = EnvironmentVariables.get_env('cost_metric', 'UnblendedCost') @@ -71,6 +84,7 @@ def __init__(self): self._environment_variables_dict['end_date'] = EnvironmentVariables.get_env('end_date', '') self._environment_variables_dict['granularity'] = EnvironmentVariables.get_env('granularity', 'DAILY') self._environment_variables_dict['cost_explorer_tags'] = EnvironmentVariables.get_env('cost_explorer_tags', '{}') + self._environment_variables_dict['PUBLIC_CLOUD_NAME'] = EnvironmentVariables.get_env('PUBLIC_CLOUD_NAME', 'AWS') # AZURE Credentials self._environment_variables_dict['AZURE_ACCOUNT_ID'] = EnvironmentVariables.get_env('AZURE_ACCOUNT_ID', '') @@ -80,7 +94,7 @@ def __init__(self): if self._environment_variables_dict['AZURE_CLIENT_ID'] and self._environment_variables_dict['AZURE_TENANT_ID']\ and self._environment_variables_dict['AZURE_CLIENT_SECRET']: self._environment_variables_dict['PUBLIC_CLOUD_NAME'] = 'AZURE' - self._environment_variables_dict['TOTAL_ACCOUNTS'] = bool(EnvironmentVariables.get_env('TOTAL_ACCOUNTS', '')) + self._environment_variables_dict['TOTAL_ACCOUNTS'] = EnvironmentVariables.get_boolean_from_environment('TOTAL_ACCOUNTS', False) # IBM env vars self._environment_variables_dict['IBM_ACCOUNT_ID'] = EnvironmentVariables.get_env('IBM_ACCOUNT_ID', '') @@ -98,7 +112,7 @@ def __init__(self): # Common env vars self._environment_variables_dict['dry_run'] = EnvironmentVariables.get_env('dry_run', 'yes') - self._environment_variables_dict['FORCE_DELETE'] = EnvironmentVariables.get_env('FORCE_DELETE', False) + self._environment_variables_dict['FORCE_DELETE'] = EnvironmentVariables.get_boolean_from_environment('FORCE_DELETE', False) self._environment_variables_dict['policy_output'] = EnvironmentVariables.get_env('policy_output', '') self._environment_variables_dict['bucket'] = EnvironmentVariables.get_env('bucket', '') self._environment_variables_dict['file_path'] = EnvironmentVariables.get_env('file_path', '') @@ -109,7 +123,7 @@ def __init__(self): self._environment_variables_dict['upload_data_es'] = EnvironmentVariables.get_env('upload_data_es', '') self._environment_variables_dict['es_host'] = EnvironmentVariables.get_env('es_host', '') self._environment_variables_dict['es_port'] = EnvironmentVariables.get_env('es_port', '') - self._environment_variables_dict['es_index'] = EnvironmentVariables.get_env('es_index', '') + self._environment_variables_dict['es_index'] = EnvironmentVariables.get_env('es_index', es_index) self._environment_variables_dict['es_doc_type'] = EnvironmentVariables.get_env('es_doc_type', '') self._environment_variables_dict['ES_TIMEOUT'] = EnvironmentVariables.get_env('ES_TIMEOUT', 2000) @@ -137,6 +151,8 @@ def __init__(self): # AWS Top Acconut self._environment_variables_dict['AWS_ACCOUNT_ROLE'] = EnvironmentVariables.get_env('AWS_ACCOUNT_ROLE', '') + self._environment_variables_dict['PAYER_SUPPORT_FEE_CREDIT'] = EnvironmentVariables.get_env('PAYER_SUPPORT_FEE_CREDIT', 0) + self._environment_variables_dict['TEMPORARY_DIR'] = EnvironmentVariables.get_env('TEMPORARY_DIR', '/tmp') self._environment_variables_dict['COST_CENTER_OWNER'] = EnvironmentVariables.get_env('COST_CENTER_OWNER', '{}') # Jira env parameters @@ -147,13 +163,112 @@ def __init__(self): self._environment_variables_dict['JIRA_PASSWORD'] = EnvironmentVariables.get_env('JIRA_PASSWORD', '') # Cloud Resource Orchestration + self._environment_variables_dict['CRO_PORTAL'] = EnvironmentVariables.get_env('CRO_PORTAL', '') self._environment_variables_dict['CLOUD_NAME'] = EnvironmentVariables.get_env('CLOUD_NAME', '') self._environment_variables_dict['MONITOR'] = EnvironmentVariables.get_env('MONITOR', '') - self._environment_variables_dict['MANAGEMENT'] = bool(EnvironmentVariables.get_env('MANAGEMENT', False)) + self._environment_variables_dict['MANAGEMENT'] = EnvironmentVariables.get_boolean_from_environment('MANAGEMENT', False) + + # GCP Account + self._environment_variables_dict['GCP_DATABASE_NAME'] = EnvironmentVariables.get_env('GCP_DATABASE_NAME') + self._environment_variables_dict['GCP_DATABASE_TABLE_NAME'] = EnvironmentVariables.get_env('GCP_DATABASE_TABLE_NAME') + if self._environment_variables_dict.get('GCP_DATABASE_TABLE_NAME'): + self._environment_variables_dict['PUBLIC_CLOUD_NAME'] = 'GCP' + + self._environment_variables_dict['EMAIL_ALERT'] = EnvironmentVariables.get_boolean_from_environment('EMAIL_ALERT', True) + self._environment_variables_dict['MANAGER_EMAIL_ALERT'] = EnvironmentVariables.get_boolean_from_environment('MANAGER_EMAIL_ALERT', True) + self._environment_variables_dict['UPDATE_TAG_BULKS'] = int(EnvironmentVariables.get_env('UPDATE_TAG_BULKS', '20')) + + # policies aggregate alert + self._environment_variables_dict['BUCKET_NAME'] = EnvironmentVariables.get_env('BUCKET_NAME') + self._environment_variables_dict['BUCKET_KEY'] = EnvironmentVariables.get_env('BUCKET_KEY') + self._environment_variables_dict['MAIL_ALERT_DAYS'] = literal_eval(EnvironmentVariables.get_env('MAIL_ALERT_DAYS', '[]')) + self._environment_variables_dict['POLICY_ACTIONS_DAYS'] = literal_eval(EnvironmentVariables.get_env('POLICY_ACTIONS_DAYS', '[]')) + self._environment_variables_dict['DEFAULT_ADMINS'] = literal_eval(EnvironmentVariables.get_env('DEFAULT_ADMINS', '[]')) + self._environment_variables_dict['KERBEROS_USERS'] = literal_eval(EnvironmentVariables.get_env('KERBEROS_USERS', '[]')) + self._environment_variables_dict['POLICIES_TO_ALERT'] = literal_eval(EnvironmentVariables.get_env('POLICIES_TO_ALERT', '[]')) + if self._environment_variables_dict.get('policy') in ['send_aggregated_alerts']: + self._environment_variables_dict['COMMON_POLICIES'] = True + # CRO -- Cloud Resource Orch + self._environment_variables_dict['CLOUD_RESOURCE_ORCHESTRATION'] = EnvironmentVariables.get_boolean_from_environment('CLOUD_RESOURCE_ORCHESTRATION', False) + self._environment_variables_dict['USER_COST_INDEX'] = EnvironmentVariables.get_env('USER_COST_INDEX', '') + self._environment_variables_dict['CRO_ES_INDEX'] = EnvironmentVariables.get_env('CRO_ES_INDEX', 'cloud-governance-resource-orchestration') + self._environment_variables_dict['CRO_COST_OVER_USAGE'] = int(EnvironmentVariables.get_env('CRO_COST_OVER_USAGE', '500')) + self._environment_variables_dict['CRO_DEFAULT_ADMINS'] = literal_eval(EnvironmentVariables.get_env('CRO_DEFAULT_ADMINS', "[]")) + self._environment_variables_dict['CRO_DURATION_DAYS'] = int(EnvironmentVariables.get_env('CRO_DURATION_DAYS', '30')) + self._environment_variables_dict['RUN_ACTIVE_REGIONS'] = EnvironmentVariables.get_boolean_from_environment('RUN_ACTIVE_REGIONS', False) + self._environment_variables_dict['CRO_RESOURCE_TAG_NAME'] = EnvironmentVariables.get_env('CRO_RESOURCE_TAG_NAME', 'TicketId') + self._environment_variables_dict['CRO_REPLACED_USERNAMES'] = literal_eval(EnvironmentVariables.get_env('CRO_REPLACED_USERNAMES', "['osdCcsAdmin']")) + self._environment_variables_dict['CE_PAYER_INDEX'] = EnvironmentVariables.get_env('CE_PAYER_INDEX', '') + self._environment_variables_dict['EMAIL_TO'] = EnvironmentVariables.get_env('EMAIL_TO', '') + self._environment_variables_dict['EMAIL_CC'] = literal_eval(EnvironmentVariables.get_env('EMAIL_CC', "[]")) + self._environment_variables_dict['MANAGER_ESCALATION_DAYS'] = int(EnvironmentVariables.get_env('MANAGER_ESCALATION_DAYS', '3')) + self._environment_variables_dict['GLOBAL_CLOUD_ADMIN'] = EnvironmentVariables.get_env('GLOBAL_CLOUD_ADMIN', 'natashba') + self._environment_variables_dict['TICKET_OVER_USAGE_LIMIT'] = int(EnvironmentVariables.get_env('TICKET_OVER_USAGE_LIMIT', '80')) + + # AWS Athena + self._environment_variables_dict['S3_RESULTS_PATH'] = EnvironmentVariables.get_env('S3_RESULTS_PATH', '') + self._environment_variables_dict['DEFAULT_ROUND_DIGITS'] = \ + int(EnvironmentVariables.get_env('DEFAULT_ROUND_DIGITS', '3')) + self._environment_variables_dict['ATHENA_DATABASE_NAME'] = EnvironmentVariables.get_env('ATHENA_DATABASE_NAME', '') + self._environment_variables_dict['ATHENA_TABLE_NAME'] = EnvironmentVariables.get_env('ATHENA_TABLE_NAME', '') + + + @staticmethod - def get_env(var: str, defval: any = ''): - return os.environ.get(var, defval) + def to_bool(arg, def_val: bool = None): + if isinstance(arg, bool): + return arg + if isinstance(arg, (int, float)): + return arg != 0 + if isinstance(arg, str): + arg = arg.lower() + if arg == 'true' or arg == 'yes': + return True + elif arg == 'false' or arg == 'no': + return False + try: + arg1 = int(arg) + return arg1 != 0 + except Exception: + pass + if def_val is not None: + return def_val + raise ParseFailed(f'Cannot parse {arg} as a boolean value') + + def get_aws_account_alias_name(self): + """ + This method return the aws account alias name + :return: + """ + iam_client = boto3.client('iam') + try: + account_alias = iam_client.list_account_aliases()['AccountAliases'] + if account_alias: + return account_alias[0].upper() + except: + return os.environ.get('account', '').upper() + + + @staticmethod + def get_env(var: str, defval=''): + lcvar = var.lower() + dashvar = lcvar.replace('_', '-') + parser = argparse.ArgumentParser(description='Run CloudGovernance', allow_abbrev=False) + if lcvar == dashvar: + parser.add_argument(f"--{lcvar}", default=os.environ.get(var, defval), type=str, metavar='String', help=var) + else: + parser.add_argument(f"--{lcvar}", f"--{dashvar}", default=os.environ.get(var, defval), type=str, + metavar='String', help=var) + args, ignore = parser.parse_known_args() + if hasattr(args, lcvar): + return getattr(args, lcvar) + else: + return os.environ.get(var, defval) + + @staticmethod + def get_boolean_from_environment(var: str, defval: bool): + return EnvironmentVariables.to_bool(EnvironmentVariables.get_env(var), defval) @property def environment_variables_dict(self): @@ -164,7 +279,6 @@ def environment_variables_dict(self): environment_variables = EnvironmentVariables() - # env vars examples # os.environ['AWS_DEFAULT_REGION'] = 'us-east-2' # os.environ['AWS_DEFAULT_REGION'] = 'all' diff --git a/cloud_governance/main/main.py b/cloud_governance/main/main.py index 61b92c2d..ca5bd962 100644 --- a/cloud_governance/main/main.py +++ b/cloud_governance/main/main.py @@ -4,6 +4,8 @@ import boto3 # regions from cloud_governance.cloud_resource_orchestration.monitor.cloud_monitor import CloudMonitor +from cloud_governance.main.main_common_operations import run_common_policies +from cloud_governance.main.run_cloud_resource_orchestration import run_cloud_resource_orchestration from cloud_governance.policy.policy_operations.aws.cost_expenditure.cost_report_policies import CostReportPolicies from cloud_governance.policy.policy_operations.azure.azure_policy_runner import AzurePolicyRunner from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp, logger @@ -13,6 +15,7 @@ remove_tag_non_cluster_resource, tag_na_resources from cloud_governance.policy.policy_operations.aws.tag_user.run_tag_iam_user import tag_iam_user, run_validate_iam_user_tags from cloud_governance.policy.policy_operations.aws.zombie_cluster.run_zombie_cluster_resources import zombie_cluster_resource +from cloud_governance.policy.policy_operations.gcp.gcp_policy_runner import GcpPolicyRunner from cloud_governance.policy.policy_operations.gitleaks.gitleaks import GitLeaks from cloud_governance.policy.policy_operations.ibm.ibm_operations.ibm_policy_runner import IBMPolicyRunner from cloud_governance.main.environment_variables import environment_variables @@ -167,12 +170,6 @@ def run_policy(account: str, policy: str, region: str, dry_run: str): raise Exception(f'Missing Policy name: {policy}') -@logger_time_stamp -def run_cloud_management(): - """This method run the cloud management""" - return CloudMonitor().run() - - @logger_time_stamp def main(): """ @@ -194,110 +191,124 @@ def main(): es_doc_type = environment_variables_dict.get('es_doc_type', '') bucket = environment_variables_dict.get('bucket', '') - non_cluster_polices_runner = None - is_non_cluster_polices_runner = policy in environment_variables_dict.get('aws_non_cluster_policies') - if is_non_cluster_polices_runner: - non_cluster_polices_runner = ZombieNonClusterPolicies() + if environment_variables_dict.get('COMMON_POLICIES'): + run_common_policies() + elif environment_variables_dict.get('CLOUD_RESOURCE_ORCHESTRATION'): + run_cloud_resource_orchestration() + else: + non_cluster_polices_runner = None + is_non_cluster_polices_runner = policy in environment_variables_dict.get('aws_non_cluster_policies') + if is_non_cluster_polices_runner: + non_cluster_polices_runner = ZombieNonClusterPolicies() - ibm_classic_infrastructure_policy_runner = None - is_tag_ibm_classic_infrastructure_runner = policy in environment_variables_dict.get('ibm_policies') - if not is_tag_ibm_classic_infrastructure_runner: - if environment_variables_dict.get('PUBLIC_CLOUD_NAME') and environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper() == 'IBM': - is_tag_ibm_classic_infrastructure_runner = policy in environment_variables_dict.get('cost_policies') - if is_tag_ibm_classic_infrastructure_runner: - ibm_classic_infrastructure_policy_runner = IBMPolicyRunner() + ibm_classic_infrastructure_policy_runner = None + is_tag_ibm_classic_infrastructure_runner = policy in environment_variables_dict.get('ibm_policies') + if not is_tag_ibm_classic_infrastructure_runner: + if environment_variables_dict.get('PUBLIC_CLOUD_NAME') and environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper() == 'IBM': + is_tag_ibm_classic_infrastructure_runner = policy in environment_variables_dict.get('cost_policies') + if is_tag_ibm_classic_infrastructure_runner: + ibm_classic_infrastructure_policy_runner = IBMPolicyRunner() - is_cost_explorer_policies_runner = '' - if not environment_variables_dict.get('PUBLIC_CLOUD_NAME'): - cost_explorer_policies_runner = None - is_cost_explorer_policies_runner = policy in environment_variables_dict.get('cost_policies') - if is_cost_explorer_policies_runner: - cost_explorer_policies_runner = CostReportPolicies() + is_cost_explorer_policies_runner = '' + if environment_variables_dict.get('PUBLIC_CLOUD_NAME') == 'AWS': + cost_explorer_policies_runner = None + is_cost_explorer_policies_runner = policy in environment_variables_dict.get('cost_policies') + if is_cost_explorer_policies_runner: + cost_explorer_policies_runner = CostReportPolicies() - is_azure_policy_runner = '' - if environment_variables_dict.get('PUBLIC_CLOUD_NAME') and environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper() == 'AZURE': - azure_cost_policy_runner = None - is_azure_policy_runner = policy in environment_variables_dict.get('cost_policies') - if is_azure_policy_runner: - azure_cost_policy_runner = AzurePolicyRunner() + is_azure_policy_runner = '' + if environment_variables_dict.get('PUBLIC_CLOUD_NAME') and environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper() == 'AZURE': + azure_cost_policy_runner = None + is_azure_policy_runner = policy in environment_variables_dict.get('cost_policies') + if is_azure_policy_runner: + azure_cost_policy_runner = AzurePolicyRunner() - # cloud_resource_orchestration lon_run/short_run - is_cloud_management = False - if environment_variables_dict.get('MANAGEMENT'): - is_cloud_management = True + is_gcp_policy_runner = '' + if environment_variables_dict.get('PUBLIC_CLOUD_NAME') and environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper() == 'GCP': + gcp_cost_policy_runner = None + is_gcp_policy_runner = policy in environment_variables_dict.get('cost_policies') + if is_gcp_policy_runner: + gcp_cost_policy_runner = GcpPolicyRunner() - @logger_time_stamp - def run_non_cluster_polices_runner(): - """ - This method run the aws non-cluster policies - @return: - """ - non_cluster_polices_runner.run() + @logger_time_stamp + def run_non_cluster_polices_runner(): + """ + This method run the aws non-cluster policies + @return: + """ + non_cluster_polices_runner.run() - def run_tag_ibm_classic_infrastructure_runner(): - """ - This method run the IBM policies - @return: - """ - ibm_classic_infrastructure_policy_runner.run() + def run_tag_ibm_classic_infrastructure_runner(): + """ + This method run the IBM policies + @return: + """ + ibm_classic_infrastructure_policy_runner.run() - @logger_time_stamp - def run_cost_explorer_policies_runner(): - """ - This method run the aws cost_explorer policies - @return: - """ - cost_explorer_policies_runner.run() + @logger_time_stamp + def run_cost_explorer_policies_runner(): + """ + This method run the aws cost_explorer policies + @return: + """ + cost_explorer_policies_runner.run() - @logger_time_stamp - def run_azure_policy_runner(): - """ - This method run the azure policies - @return: - """ - azure_cost_policy_runner.run() + @logger_time_stamp + def run_azure_policy_runner(): + """ + This method run the azure policies + @return: + """ + azure_cost_policy_runner.run() - # 1. ELK Uploader - if upload_data_es: - input_data = {'es_host': es_host, - 'es_port': int(es_port), - 'es_index': es_index, - 'es_doc_type': es_doc_type, - 'es_add_items': {'account': account}, - 'bucket': bucket, - 'logs_bucket_key': 'logs', - 's3_file_name': 'resources.json', - 'region': region_env, - 'policy': policy, - } - elk_uploader = ESUploader(**input_data) - elk_uploader.upload_to_es(account=account) - # 2. POLICY - elif is_non_cluster_polices_runner: - run_non_cluster_polices_runner() - elif is_tag_ibm_classic_infrastructure_runner: - run_tag_ibm_classic_infrastructure_runner() - elif is_cost_explorer_policies_runner: - run_cost_explorer_policies_runner() - elif is_azure_policy_runner: - run_azure_policy_runner() - elif is_cloud_management: - run_cloud_management() - else: - if not policy: - logger.exception(f'Missing Policy name: "{policy}"') - raise Exception(f'Missing Policy name: "{policy}"') - if region_env == 'all': - # must be set for boto3 client default region - # environment_variables_dict['AWS_DEFAULT_REGION'] = 'us-east-2' - ec2 = boto3.client('ec2') - regions_data = ec2.describe_regions() - for region in regions_data['Regions']: - # logger.info(f"region: {region['RegionName']}") - environment_variables_dict['AWS_DEFAULT_REGION'] = region['RegionName'] - run_policy(account=account, policy=policy, region=region['RegionName'], dry_run=dry_run) + @logger_time_stamp + def run_gcp_policy_runner(): + """ + This method run the gcp policies + """ + gcp_cost_policy_runner.run() + + # 1. ELK Uploader + if upload_data_es: + input_data = {'es_host': es_host, + 'es_port': int(es_port), + 'es_index': es_index, + 'es_doc_type': es_doc_type, + 'es_add_items': {'account': account}, + 'bucket': bucket, + 'logs_bucket_key': 'logs', + 's3_file_name': 'resources.json', + 'region': region_env, + 'policy': policy, + } + elk_uploader = ESUploader(**input_data) + elk_uploader.upload_to_es(account=account) + # 2. POLICY + elif is_non_cluster_polices_runner: + run_non_cluster_polices_runner() + elif is_tag_ibm_classic_infrastructure_runner: + run_tag_ibm_classic_infrastructure_runner() + elif is_cost_explorer_policies_runner: + run_cost_explorer_policies_runner() + elif is_azure_policy_runner: + run_azure_policy_runner() + elif is_gcp_policy_runner: + run_gcp_policy_runner() else: - run_policy(account=account, policy=policy, region=region_env, dry_run=dry_run) + if not policy: + logger.exception(f'Missing Policy name: "{policy}"') + raise Exception(f'Missing Policy name: "{policy}"') + if region_env == 'all': + # must be set for boto3 client default region + # environment_variables_dict['AWS_DEFAULT_REGION'] = 'us-east-2' + ec2 = boto3.client('ec2') + regions_data = ec2.describe_regions() + for region in regions_data['Regions']: + # logger.info(f"region: {region['RegionName']}") + environment_variables_dict['AWS_DEFAULT_REGION'] = region['RegionName'] + run_policy(account=account, policy=policy, region=region['RegionName'], dry_run=dry_run) + else: + run_policy(account=account, policy=policy, region=region_env, dry_run=dry_run) main() diff --git a/cloud_governance/main/main_common_operations.py b/cloud_governance/main/main_common_operations.py new file mode 100644 index 00000000..eb6a2437 --- /dev/null +++ b/cloud_governance/main/main_common_operations.py @@ -0,0 +1,12 @@ +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.policy.policy_runners.common_policy_runner import CommonPolicyRunner + + +@logger_time_stamp +def run_common_policies(): + """ + This method run the common policies + :return: + """ + common_policy_runner = CommonPolicyRunner() + common_policy_runner.run() diff --git a/cloud_governance/main/run_cloud_resource_orchestration.py b/cloud_governance/main/run_cloud_resource_orchestration.py new file mode 100644 index 00000000..268b0479 --- /dev/null +++ b/cloud_governance/main/run_cloud_resource_orchestration.py @@ -0,0 +1,19 @@ +from tempfile import TemporaryDirectory + +from cloud_governance.cloud_resource_orchestration.monitor.cloud_monitor import CloudMonitor +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.main.environment_variables import environment_variables + + +@logger_time_stamp +def run_cloud_management(): + """This method run the cloud management""" + environment_variables_dict = environment_variables.environment_variables_dict + with TemporaryDirectory() as cache_temp_dir: + environment_variables_dict['TEMPORARY_DIRECTORY'] = cache_temp_dir + environment_variables_dict['policy'] = 'cloud_resource_orchestration' + return CloudMonitor().run() + + +def run_cloud_resource_orchestration(): + run_cloud_management() diff --git a/cloud_governance/policy/aws/cost_billing_reports.py b/cloud_governance/policy/aws/cost_billing_reports.py index ee15ae6b..a2acb789 100644 --- a/cloud_governance/policy/aws/cost_billing_reports.py +++ b/cloud_governance/policy/aws/cost_billing_reports.py @@ -35,13 +35,13 @@ def __init__(self): self.__gsheet_id = self._environment_variables_dict.get('SPREADSHEET_ID', '') self.gdrive_operations = GoogleDriveOperations() self.update_to_gsheet = UploadToGsheet() - self.cost_center, self.__account_budget, self.__years = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.__account_id) + self.cost_center, self.__account_budget, self.__years, self.__owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.__account_id) except: pass def __get_start_date(self, end_date: datetime, days: int, operation: operator) -> datetime: """ - This method return start_date + This method returns start_date @param operation: @param end_date: @param days: diff --git a/cloud_governance/policy/aws/cost_explorer.py b/cloud_governance/policy/aws/cost_explorer.py index 21426c2a..f1313dd4 100644 --- a/cloud_governance/policy/aws/cost_explorer.py +++ b/cloud_governance/policy/aws/cost_explorer.py @@ -1,4 +1,3 @@ - from ast import literal_eval from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations @@ -16,6 +15,18 @@ class CostExplorer: """ BULK_UPLOAD_THREADS = 8 + SAVINGS_PLAN_FILTER = { # savings plan usage for ce filter + 'Dimensions': { + 'Key': 'RECORD_TYPE', + 'Values': ['SavingsPlanRecurringFee', 'SavingsPlanNegation', 'SavingsPlanCoveredUsage'] + } + } + EXCLUDE = 'exclude' + INCLUDE = 'include' + CE_KEY_RESULTS_BY_TIME = 'ResultsByTime' + INDEX_ID = 'index_id' + APPEND = 'a' + CE_OPERATION = 'Not' def __init__(self): super().__init__() @@ -32,11 +43,12 @@ def __init__(self): self._elastic_upload = ElasticUpload() self.__account = self.__environment_variables_dict.get('account').upper().replace('OPENSHIFT-', "").strip() - def filter_data_by_tag(self, groups: dict, tag: str): + def filter_data_by_tag(self, groups: dict, tag: str, savings_plan: str): """ This method extract data by tag @param tag: @param groups: Data from the cloud explorer + @param savings_plan: @return: converted into dict format """ data = {} @@ -58,13 +70,14 @@ def filter_data_by_tag(self, groups: dict, tag: str): else: if 'vm_import_image' in name: name = 'vm_import_image' - index_id = f'{start_time.lower()}-{account.lower()}-{tag.lower()}-{name.lower()}' + index_id = f'{start_time}-{account}-{tag}-savings-{savings_plan}-{name}'.lower() if index_id not in data: - upload_data = {tag: name if tag.upper() == 'ChargeType'.upper() else name.upper(), - 'Cost': round(float(amount), 3), 'index_id': index_id, 'timestamp': start_time} + upload_data = {tag: name if tag.upper() in list(self.__cost_explorer.CE_COST_TYPES) else name.upper(), + 'Cost': round(float(amount), 3), self.INDEX_ID: index_id, 'timestamp': start_time, 'savings_plan': savings_plan} if 'global' in self._elastic_upload.es_index: if 'Budget' not in upload_data: upload_data['Budget'] = self._elastic_upload.account + upload_data['tag'] = tag.lower() data[index_id] = upload_data else: data[index_id]['Cost'] += round(float(amount), 3) @@ -77,15 +90,19 @@ def __get_daily_cost_by_tags(self): """ data_house = {} for tag in self.cost_tags: - if self.start_date and self.end_date: - response = self.__cost_explorer.get_cost_by_tags(tag=tag, start_date=self.start_date, end_date=self.end_date, granularity=self.granularity, cost_metric=self.cost_metric) - else: - response = self.__cost_explorer.get_cost_by_tags(tag=tag, granularity=self.granularity, cost_metric=self.cost_metric) - results_by_time = response.get('ResultsByTime') - if results_by_time: - data_house[tag] = [] - for result in results_by_time: - data_house[tag].extend(self.filter_data_by_tag(result, tag)) + for savings_plan in [self.EXCLUDE, self.INCLUDE]: + filters = {} + if savings_plan == self.EXCLUDE: + filters = {self.CE_OPERATION: self.SAVINGS_PLAN_FILTER} + if self.start_date and self.end_date: + response = self.__cost_explorer.get_cost_by_tags(tag=tag, start_date=self.start_date, end_date=self.end_date, granularity=self.granularity, cost_metric=self.cost_metric, Filter=filters) + else: + response = self.__cost_explorer.get_cost_by_tags(tag=tag, granularity=self.granularity, cost_metric=self.cost_metric, Filter=filters) + results_by_time = response.get(self.CE_KEY_RESULTS_BY_TIME) + if results_by_time: + data_house[f'{tag}-{savings_plan}'] = [] + for result in results_by_time: + data_house[f'{tag}-{savings_plan}'].extend(self.filter_data_by_tag(result, tag, savings_plan)) return data_house @logger_time_stamp @@ -97,12 +114,12 @@ def __upload_data(self, data: list, index: str): @return: """ if self.file_name: - with open(f'/tmp/{self.file_name}', 'a') as file: + with open(f'/tmp/{self.file_name}', self.APPEND) as file: for value in data: file.write(f'{value}\n') else: for value in data: - self.upload_item_to_es(index=index, item=value, index_id=value['index_id']) + self.upload_item_to_es(index=index, item=value, index_id=value[self.INDEX_ID]) logger.info(f'Data uploaded to {index}, Total Data: {len(data)}') def upload_item_to_es(self, item: dict, index: str, index_id: str = ''): @@ -126,8 +143,8 @@ def upload_tags_cost_to_elastic_search(self): logger.info(f'Get {self.granularity} Cost usage by metric: {self.cost_metric}') cost_data = self.__get_daily_cost_by_tags() for key, values in cost_data.items(): - index = f'{self._elastic_upload.es_index}-{key.lower()}' - self.__upload_data(values, index) + logger.info(f"Uploading the data of {key} tag") + self.__upload_data(values, self._elastic_upload.es_index) def run(self): """ diff --git a/cloud_governance/policy/aws/cost_explorer_payer_billings.py b/cloud_governance/policy/aws/cost_explorer_payer_billings.py index 2c28739f..be159456 100644 --- a/cloud_governance/policy/aws/cost_explorer_payer_billings.py +++ b/cloud_governance/policy/aws/cost_explorer_payer_billings.py @@ -1,26 +1,43 @@ import copy import datetime +import logging from ast import literal_eval import boto3 + +from cloud_governance.common.clouds.aws.iam.iam_operations import IAMOperations +from cloud_governance.common.clouds.aws.savingsplan.savings_plans_operations import SavingsPlansOperations from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp -from cloud_governance.common.logger.init_logger import logger +from cloud_governance.common.logger.init_logger import logger, handler from cloud_governance.common.clouds.aws.cost_explorer.cost_explorer_operations import CostExplorerOperations from cloud_governance.policy.aws.cost_billing_reports import CostBillingReports class CostExplorerPayerBillings(CostBillingReports): - """This class is responsible for generation cost billing report for Budget, Actual, Forecast from the Org level""" + """ + This class is responsible for generation cost billing report for Budget, Actual, Forecast from the Org level + Monthly savings Plan Amortization: (linked_account_total_cost/payer_account_total_cost) * monthly_savings_plan_cost + Monthly_support_fee: (monthly_support_fee - (monthly_support_fee * discount ) ) * (linked_account_total_cost/payer_account_total_cost) + """ + + DEFAULT_ROUND_DIGITS = 3 def __init__(self): super().__init__() self.__aws_role = self._environment_variables_dict.get("AWS_ACCOUNT_ROLE") self.__access_key, self.__secret_key, self.__session = self.__get_sts_credentials() self.__ce_client = boto3.client('ce', aws_access_key_id=self.__access_key, aws_secret_access_key=self.__secret_key, aws_session_token=self.__session) + self.__savings_plan_client = boto3.client('savingsplans', aws_access_key_id=self.__access_key, aws_secret_access_key=self.__secret_key, aws_session_token=self.__session) + self.__iam_client = boto3.client('iam', aws_access_key_id=self.__access_key, aws_secret_access_key=self.__secret_key, aws_session_token=self.__session) + self.__assumed_role_account_name = IAMOperations(iam_client=self.__iam_client).get_account_alias_cloud_name() self.__cost_explorer_operations = CostExplorerOperations(ce_client=self.__ce_client) - self.__cost_center_owner = literal_eval(self._environment_variables_dict.get('COST_CENTER_OWNER')) + self.__savings_plan_operations = SavingsPlansOperations(savings_plan_client=self.__savings_plan_client) self.__replacement_account = literal_eval(self._environment_variables_dict.get('REPLACE_ACCOUNT_NAME')) + self.__savings_discounts = float(self._environment_variables_dict.get('PAYER_SUPPORT_FEE_CREDIT', 0)) + self.__monthly_cost_for_spa_calc = {} + self.__monthly_cost_for_support_fee = {} + self.__temporary_dir = self._environment_variables_dict.get('TEMPORARY_DIR', '') def __get_sts_credentials(self): """This method returns the temporary credentials from the sts service""" @@ -50,23 +67,27 @@ def filter_data_by_tag(self, cost_data: dict, tag: str, cost_center: int = ''): if name and amount: if name not in data: if cost_center: - acc_cost_center, account_budget, years = self.update_to_gsheet.get_cost_center_budget_details(account_id=name, dir_path='/tmp') + acc_cost_center, account_budget, years, owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=name, dir_path=self.__temporary_dir) timestamp = datetime.datetime.strptime(start_time, '%Y-%m-%d') month = datetime.datetime.strftime(timestamp, "%Y %b") - owner = self.__cost_center_owner.get(str(acc_cost_center)) if self.__cost_center_owner.get(str(acc_cost_center)) else 'Others' + month_full_name = datetime.datetime.strftime(timestamp, "%B") + payer_monthly_savings_plan = self.update_to_gsheet.get_monthly_spa(month_name=month_full_name, dir_path=self.__temporary_dir) budget = account_budget if start_time.split('-')[0] in years else 0 index_id = f'{start_time}-{name}' - upload_data = {tag: name, 'Actual': round(float(amount), 3), 'start_date': start_time, + upload_data = {tag: name, 'Actual': round(float(amount), self.DEFAULT_ROUND_DIGITS), 'start_date': start_time, 'timestamp': timestamp, 'CloudName': 'AWS', 'Month': month, 'Forecast': 0, 'filter_date': f'{start_time}-{month.split()[-1]}', - 'Budget': round(budget / self.MONTHS, 3), 'CostCenter': cost_center, + 'Budget': round(budget / self.MONTHS, self.DEFAULT_ROUND_DIGITS), 'CostCenter': cost_center, 'AllocatedBudget': budget, - "Owner": owner + "Owner": owner, + 'SavingsPlanCost': (float(amount) / float(self.__monthly_cost_for_spa_calc.get(start_time))) * payer_monthly_savings_plan, + 'TotalPercentage': (float(amount) / float(self.__monthly_cost_for_spa_calc.get(start_time))) } + upload_data['PremiumSupportFee'] = (float(self.__monthly_cost_for_support_fee.get(start_time)) - (float(self.__monthly_cost_for_support_fee.get(start_time)) * self.__savings_discounts)) * upload_data['TotalPercentage'], else: index_id = f'{start_time}-{name}' - upload_data = {tag: name, 'Actual': round(float(amount), 3)} + upload_data = {tag: name, 'Actual': round(float(amount), self.DEFAULT_ROUND_DIGITS)} if index_id: data[index_id] = upload_data if cost_data.get('DimensionValueAttributes'): @@ -82,20 +103,28 @@ def filter_data_by_tag(self, cost_data: dict, tag: str, cost_center: int = ''): return data def filter_forecast_data(self, cost_forecast_data: list, cost_usage_data: dict, account_id: str, cost_center: int, account: str): - acc_cost_center, account_budget, years = self.update_to_gsheet.get_cost_center_budget_details(account_id=account_id, dir_path='/tmp') - owner = self.__cost_center_owner.get(str(acc_cost_center)) if self.__cost_center_owner.get(str(acc_cost_center)) else 'Others' + acc_cost_center, account_budget, years, owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=account_id, dir_path=self.__temporary_dir) for cost_forecast in cost_forecast_data: start_date = str((cost_forecast.get('TimePeriod').get('Start'))) start_year = start_date.split('-')[0] - cost = round(float(cost_forecast.get('MeanValue')), 3) + cost = round(float(cost_forecast.get('MeanValue')), self.DEFAULT_ROUND_DIGITS) index = f'{start_date}-{account_id}' + month_full_name = datetime.datetime.strftime(datetime.datetime.strptime(start_date, '%Y-%m-%d'), "%B") + total_percentage = (cost / float(self.__monthly_cost_for_spa_calc.get(start_date))) + payer_monthly_savings_plan = self.update_to_gsheet.get_monthly_spa(month_name=month_full_name, dir_path=self.__temporary_dir) if index in cost_usage_data[account]: cost_usage_data[account][index]['Forecast'] = cost + cost_usage_data[account][index]['TotalPercentage'] = total_percentage + cost_usage_data[account][index]['SavingsPlanCost'] = total_percentage * payer_monthly_savings_plan + cost_usage_data[account][index]['PremiumSupportFee'] = total_percentage * (float(self.__monthly_cost_for_support_fee.get(start_date)) - (float(self.__monthly_cost_for_support_fee.get(start_date)) * self.__savings_discounts)) else: data = {} data['AccountId'] = account_id data['Actual'] = 0 data['Forecast'] = cost + data['TotalPercentage'] = total_percentage + data['SavingsPlanCost'] = total_percentage * payer_monthly_savings_plan + data['PremiumSupportFee'] = total_percentage * (float(self.__monthly_cost_for_support_fee.get(start_date)) - (float(self.__monthly_cost_for_support_fee.get(start_date)) * self.__savings_discounts)) data['Account'] = account data['start_date'] = str((cost_forecast.get('TimePeriod').get('Start'))) data['index_id'] = f"""{data['start_date']}-{data['Account'].lower()}""" @@ -103,7 +132,7 @@ def filter_forecast_data(self, cost_forecast_data: list, cost_usage_data: dict, data['Month'] = datetime.datetime.strftime(data['timestamp'], '%Y %b') data['Owner'] = owner if start_year in years: - data['Budget'] = round(account_budget / self.MONTHS, 3) + data['Budget'] = round(account_budget / self.MONTHS, self.DEFAULT_ROUND_DIGITS) data['AllocatedBudget'] = account_budget else: data['Budget'] = 0 @@ -113,7 +142,6 @@ def filter_forecast_data(self, cost_forecast_data: list, cost_usage_data: dict, data['filter_date'] = f'{data["start_date"]}-{data["Month"].split()[-1]}' cost_usage_data[account][index] = data - @logger_time_stamp def get_linked_accounts_forecast(self, linked_account_usage: dict): """ This method append the forecast to the linked accounts @@ -125,8 +153,8 @@ def get_linked_accounts_forecast(self, linked_account_usage: dict): try: cost_forecast_data = self.__cost_explorer_operations.get_cost_forecast(start_date=start_date, end_date=end_date, granularity=self.GRANULARITY, cost_metric=self.COST_METRIC, Filter={'Dimensions': {'Key': 'LINKED_ACCOUNT', 'Values': [account_id]}}) self.filter_forecast_data(cost_forecast_data=cost_forecast_data['ForecastResultsByTime'], cost_center=cost_center, account=account, account_id=account_id, cost_usage_data=linked_account_usage) - except: - logger.info(f'No Data to get forecast: {account_id}: {account}') + except Exception as err: + logger.info(f'No Data to get forecast: {account_id}: {account}, {err}') @logger_time_stamp def get_cost_centers(self): @@ -151,9 +179,38 @@ def get_linked_accounts_usage(self): account = usage['Account'] cost_usage_data.setdefault(account, {}).update({idx: usage}) self.get_linked_accounts_forecast(linked_account_usage=cost_usage_data) + self.__get_ce_cost_usage_by_filter_tag(tag_name='spot', cost_centers=cost_centers, cost_usage_data=cost_usage_data) + handler.setLevel(logging.WARN) + # To prevent printing the **kwargs of the function when using the logger_time_stamp decorator. self.upload_data_elastic_search(linked_account_usage=cost_usage_data) + handler.setLevel(logging.INFO) return cost_usage_data + def __get_ce_cost_usage_by_filter_tag(self, cost_centers: list, tag_name: str, cost_usage_data: dict): + """ + This method returns the cost by filter tag_name + :param cost_centers: + :param tag_name: + :return: + """ + start_date, end_date = self.get_date_ranges() + for cost_center in cost_centers: + cost_center_number = cost_center.get('CostCenter') + filter_cost_center = {'CostCategories': {'Key': 'CostCenter', 'Values': [cost_center_number]}} + values = self.__cost_explorer_operations.CE_COST_FILTERS[tag_name.upper()]['Values'] + filter_tag_value = {'Dimensions': {'Key': 'PURCHASE_TYPE', 'Values': values}} + group_by = {'Type': 'DIMENSION', 'Key': 'LINKED_ACCOUNT'} + cost_data = self.__cost_explorer_operations.get_cost_and_usage_from_aws(start_date=start_date, end_date=end_date, granularity="MONTHLY", + GroupBy=[group_by], Filter={'And': [filter_cost_center, filter_tag_value]}) + filtered_data = self.__cost_explorer_operations.get_ce_report_filter_data(ce_response=cost_data, tag_name=tag_name) + if filtered_data: + for index_id, row in filtered_data.items(): + account = row.get('Account') + if account in self.__replacement_account: + account = self.__replacement_account[account] + if account in cost_usage_data: + cost_usage_data[account][index_id][f'{tag_name.title()}Usage'] = round(float(row.get(tag_name)), self.DEFAULT_ROUND_DIGITS) + @logger_time_stamp def upload_data_elastic_search(self, linked_account_usage: dict): """This method uploads the data to elastic search""" @@ -163,8 +220,43 @@ def upload_data_elastic_search(self, linked_account_usage: dict): monthly_account_cost.append(cost) self.elastic_upload.es_upload_data(items=monthly_account_cost, set_index='index_id') + def filter_cost_details_for_sp(self, total_cost: list): + """"This method filter the account total cost""" + results = {} + for row in total_cost: + start_time = row.get('TimePeriod').get('Start') + if row.get('MeanValue'): + cost = round(float(row.get('MeanValue')), self.DEFAULT_ROUND_DIGITS) + else: + cost = round(float(row.get('Total').get('UnblendedCost').get('Amount')), self.DEFAULT_ROUND_DIGITS) + results[start_time] = cost + return results + + def get_monthly_cost_details(self, start_date: datetime = None, end_date: datetime = None): + """This method list the savings plan details""" + current_date = datetime.datetime.utcnow() + if not start_date and not end_date: + end_date = (current_date.replace(day=1) - datetime.timedelta(days=1)).date() + start_date = end_date.replace(day=1) + end_date = end_date + datetime.timedelta(days=1) + payer_cost_response = self.__cost_explorer_operations.get_cost_and_usage_from_aws(start_date=str(start_date), end_date=str(end_date), granularity='MONTHLY', cost_metric=self.COST_METRIC, Filter={'Not': {'Dimensions': {'Key': 'RECORD_TYPE', 'Values': ['Support', 'Refund', 'Credit']}}}) + payer_support_fee = self.__cost_explorer_operations.get_cost_and_usage_from_aws(start_date=str(start_date), end_date=str(end_date), granularity='MONTHLY', cost_metric=self.COST_METRIC, Filter={'Dimensions': {'Key': 'RECORD_TYPE', 'Values': ['Support']}}) + filtered_payer_cost = self.filter_cost_details_for_sp(payer_cost_response.get('ResultsByTime')) + filtered_support_fee = self.filter_cost_details_for_sp(payer_support_fee.get('ResultsByTime')) + self.__monthly_cost_for_spa_calc = filtered_payer_cost + self.__monthly_cost_for_support_fee.update(filtered_support_fee) + start_date = current_date.date() + end_date = start_date + datetime.timedelta(days=360) + forecast_response = self.__cost_explorer_operations.get_cost_forecast(start_date=str(start_date), end_date=str(end_date), granularity=self.GRANULARITY, cost_metric=self.COST_METRIC, Filter={'Not': {'Dimensions': {'Key': 'RECORD_TYPE', 'Values': ['Support', 'Refund', 'Credit']}}}) + payer_forecast_support_fee = self.__cost_explorer_operations.get_cost_forecast(start_date=str(start_date), end_date=str(end_date), granularity=self.GRANULARITY, cost_metric=self.COST_METRIC, Filter={'Dimensions': {'Key': 'RECORD_TYPE', 'Values': ['Support']}}) + filtered_payer_forecast = self.filter_cost_details_for_sp(forecast_response.get('ForecastResultsByTime')) + filtered_payer_support_forecast = self.filter_cost_details_for_sp(payer_forecast_support_fee.get('ForecastResultsByTime')) + self.__monthly_cost_for_spa_calc.update(filtered_payer_forecast) + self.__monthly_cost_for_support_fee.update(filtered_payer_support_forecast) + def run(self): """ This method run the methods """ + self.get_monthly_cost_details() self.get_linked_accounts_usage() diff --git a/cloud_governance/policy/aws/cost_over_usage.py b/cloud_governance/policy/aws/cost_over_usage.py index e28b1930..616367f7 100644 --- a/cloud_governance/policy/aws/cost_over_usage.py +++ b/cloud_governance/policy/aws/cost_over_usage.py @@ -28,7 +28,7 @@ def __init__(self): def get_user_used_instances(self, user_used_list: list): """ - This method return user used instances group by region + This method returns user used instances group by region @return: """ region_resources = {} diff --git a/cloud_governance/policy/aws/ebs_unattached.py b/cloud_governance/policy/aws/ebs_unattached.py index e882cb65..2470918c 100644 --- a/cloud_governance/policy/aws/ebs_unattached.py +++ b/cloud_governance/policy/aws/ebs_unattached.py @@ -52,9 +52,10 @@ def __delete_ebs_unattached(self): days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE, extra_purse=ebs_cost, delta_cost=delta_cost) if unattached_volumes: - unattached_volumes_data.append([volume.get('VolumeId'), - self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='Name'), - self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='User'), - str(last_detached_days), - self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='Policy')]) + unattached_volumes_data.append({'ResourceId': volume.get('VolumeId'), + 'Name': self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='Name'), + 'User': self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='User'), + 'Days': str(last_detached_days), + 'Skip': self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='Policy') + }) return unattached_volumes_data diff --git a/cloud_governance/policy/aws/ec2_idle.py b/cloud_governance/policy/aws/ec2_idle.py index 947086db..d78119dc 100644 --- a/cloud_governance/policy/aws/ec2_idle.py +++ b/cloud_governance/policy/aws/ec2_idle.py @@ -116,7 +116,7 @@ def __get_metrics_from_cloud_watch(self, instance_id: str, instance_period: int) def __get_proposed_metrics(self, metrics: list, metric_period: int): """ - This method return the metrics + This method returns the metrics @param metrics: @param metric_period: @return: @@ -125,7 +125,7 @@ def __get_proposed_metrics(self, metrics: list, metric_period: int): def __get_time_difference(self, launch_time: datetime): """ - This method return the difference of datetime + This method returns the difference of datetime @param launch_time: @return: """ diff --git a/cloud_governance/policy/aws/ec2_stop.py b/cloud_governance/policy/aws/ec2_stop.py index bc558717..6f537898 100644 --- a/cloud_governance/policy/aws/ec2_stop.py +++ b/cloud_governance/policy/aws/ec2_stop.py @@ -71,10 +71,15 @@ def __fetch_stop_instance(self, instance_days: int, delete_instance_days: int, s stopped_instance_tags[instance_id] = resource.get('Tags') ec2_types[instance_id] = resource.get('InstanceType') block_device_mappings[instance_id] = resource.get('BlockDeviceMappings') - stopped_instances.append([resource.get('InstanceId'), self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='Name'), - self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='User'), str(resource.get('LaunchTime')), - self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='Policy') - ]) + stopped_instances.append({ + 'policy': self._policy, + 'ResourceId': resource.get('InstanceId'), + 'StoppedDays': days, + 'StoppedDate': str(resource.get('UsageOperationUpdateTime')), + 'Name': self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='Name'), + 'User': self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='User'), + 'LaunchTime': str(resource.get('LaunchTime')), + 'Policy': self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='Policy')}) if self._dry_run == "no": for instance_id, tags in stopped_instance_tags.items(): if self._get_policy_value(tags=tags) not in ('NOTDELETE', 'SKIP'): diff --git a/cloud_governance/policy/aws/empty_roles.py b/cloud_governance/policy/aws/empty_roles.py index 2482b81d..72d31fbb 100644 --- a/cloud_governance/policy/aws/empty_roles.py +++ b/cloud_governance/policy/aws/empty_roles.py @@ -12,7 +12,7 @@ def __init__(self): def run(self): """ - This method return all empty roles, delete if dry_run no + This method returns all empty roles, delete if dry_run no @return: """ return self.__delete_empty_roles() @@ -40,7 +40,12 @@ def __delete_empty_roles(self): empty_days = self._get_resource_last_used_days(tags=tags) empty_role = self._check_resource_and_delete(resource_name='IAM Role', resource_id='RoleName', resource_type='CreateRole', resource=get_role, empty_days=empty_days, days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE, tags=tags) if empty_role: - zombie_roles.append([empty_role.get('RoleName'), self._get_tag_name_from_tags(tags=tags, tag_name='User'), self._get_policy_value(tags=tags), empty_days]) + zombie_roles.append({ + 'ResourceId': empty_role.get('RoleName'), + 'Name': empty_role.get('RoleName'), + 'User': self._get_tag_name_from_tags(tags=tags, tag_name='User'), + 'Skip': self._get_policy_value(tags=tags), + 'Days': empty_days}) else: empty_days = 0 self._update_resource_tags(resource_id=role_name, tags=tags, left_out_days=empty_days, resource_left_out=role_empty) diff --git a/cloud_governance/policy/aws/ip_unattached.py b/cloud_governance/policy/aws/ip_unattached.py index c26ca878..8cefd079 100644 --- a/cloud_governance/policy/aws/ip_unattached.py +++ b/cloud_governance/policy/aws/ip_unattached.py @@ -13,7 +13,7 @@ def __init__(self): def run(self): """ - This method return zombie elastic_ip's and delete if dry_run no + This method returns zombie elastic_ip's and delete if dry_run no @return: """ addresses = self._ec2_operations.get_elastic_ips() @@ -40,9 +40,12 @@ def run(self): days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE, tags=tags, extra_purse=eip_cost, delta_cost=delta_cost) if zombie_eip: - zombie_addresses.append([address.get('AllocationId'), self._get_tag_name_from_tags(tags=tags), - self._get_tag_name_from_tags(tags=tags, tag_name='User'), address.get('PublicIp'), - self._get_policy_value(tags=tags), unused_days]) + zombie_addresses.append({'ResourceId': address.get('AllocationId'), + 'Name': self._get_tag_name_from_tags(tags=tags), + 'User': self._get_tag_name_from_tags(tags=tags, tag_name='User'), + 'PublicIp': address.get('PublicIp'), + 'Skip': self._get_policy_value(tags=tags), + 'Days': unused_days}) else: unused_days = 0 self._update_resource_tags(resource_id=address.get('AllocationId'), tags=tags, left_out_days=unused_days, resource_left_out=ip_no_used) diff --git a/cloud_governance/policy/aws/monthly_report.py b/cloud_governance/policy/aws/monthly_report.py index 89a03530..d2b3b203 100644 --- a/cloud_governance/policy/aws/monthly_report.py +++ b/cloud_governance/policy/aws/monthly_report.py @@ -26,7 +26,7 @@ def __init__(self): def policy_description(self, policy_name: str): """ - This method return the policy description + This method returns the policy description @param policy_name: @return: """ @@ -35,7 +35,7 @@ def policy_description(self, policy_name: str): 'ec2_idle': 'stops the idle instances in the last 7 days. ( CPU < 5%, Network < 5k )', 'ebs_unattached': 'Delete unattached EBS volumes, where the unused days are calculated by the last DeattachedTime', 'ip_unattached': 'Delete all the elastic_ips that are unused', - 'nat_gateway_unused': ' Delete all unused nat gateways', + 'unused_nat_gateway': ' Delete all unused nat gateways', 'zombie_snapshots': 'Delete all the snapshots which the AMI does not use', 's3_inactive': 'Delete the empty buckets which don’t have any content.', 'empty_roles': 'Delete the empty role which does\'t have any policies', @@ -82,7 +82,7 @@ def send_monthly_report(self): def row_span(self, cols: int): """ - This method return the table data with colspan + This method returns the table data with colspan @param cols: @return: """ diff --git a/cloud_governance/policy/aws/nat_gateway_unused.py b/cloud_governance/policy/aws/nat_gateway_unused.py deleted file mode 100644 index 43c5f112..00000000 --- a/cloud_governance/policy/aws/nat_gateway_unused.py +++ /dev/null @@ -1,51 +0,0 @@ - -from cloud_governance.policy.policy_operations.aws.zombie_non_cluster.run_zombie_non_cluster_policies import NonClusterZombiePolicy - - -class NatGatewayUnused(NonClusterZombiePolicy): - """ - This class sends an alert mail for zombie Nat gateways ( based on vpc routes ) - to the user after 4 days and delete after 7 days. - """ - - def __init__(self): - super().__init__() - - def __check_nat_gateway_in_routes(self, nat_gateway_id: str): - route_tables = self._ec2_client.describe_route_tables()['RouteTables'] - nat_gateway_found = False - for route_table in route_tables: - for route in route_table.get('Routes'): - if route.get('NatGatewayId') == nat_gateway_id: - nat_gateway_found = True - return nat_gateway_found - - def run(self): - """ - This method return zombie NatGateways, delete if dry_run no - @return: - """ - nat_gateways = self._ec2_operations.get_nat_gateways() - nat_gateway_unused_data = [] - for nat_gateway in nat_gateways: - if self._get_policy_value(tags=nat_gateway.get('Tags', [])) not in ('NOTDELETE', 'SKIP'): - nat_gateway_id = nat_gateway.get('NatGatewayId') - tags = nat_gateway.get('Tags') - gateway_unused = False - if not self._check_cluster_tag(tags=tags): - if nat_gateway.get('State') == 'available': - if not self.__check_nat_gateway_in_routes(nat_gateway_id=nat_gateway_id): - gateway_unused = True - unused_days = self._get_resource_last_used_days(tags=tags) - zombie_nat_gateway = self._check_resource_and_delete(resource_name='NatGateway', - resource_id='NatGatewayId', - resource_type='CreateNatGateway', - resource=nat_gateway, - empty_days=unused_days, - days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE, tags=tags) - if zombie_nat_gateway: - nat_gateway_unused_data.append([nat_gateway_id, self._get_tag_name_from_tags(tags=tags, tag_name='User'), zombie_nat_gateway.get('VpcId'), self._get_policy_value(tags=tags), unused_days]) - else: - unused_days = 0 - self._update_resource_tags(resource_id=nat_gateway_id, tags=tags, left_out_days=unused_days, resource_left_out=gateway_unused) - return nat_gateway_unused_data diff --git a/cloud_governance/policy/aws/s3_inactive.py b/cloud_governance/policy/aws/s3_inactive.py index 118a36a6..b0ae5eb0 100644 --- a/cloud_governance/policy/aws/s3_inactive.py +++ b/cloud_governance/policy/aws/s3_inactive.py @@ -15,7 +15,7 @@ def __init__(self): def run(self): """ - This method return all Empty buckets and delete if dry_run no + This method returns all Empty buckets and delete if dry_run no @return: """ return self.__delete_s3_inactive() @@ -49,7 +49,7 @@ def __delete_s3_inactive(self): self._cloudtrail.set_cloudtrail(region_name=region) empty_bucket = self._check_resource_and_delete(resource_name='S3 Bucket', resource_id='Name', resource_type='CreateBucket', resource=bucket, empty_days=empty_days, days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE, tags=tags) if empty_bucket: - empty_buckets.append([bucket.get('Name'), self._get_tag_name_from_tags(tags=tags, tag_name='User'), str(bucket.get('CreationDate')), str(empty_days), self._get_policy_value(tags=tags)]) + empty_buckets.append({'ResourceId': bucket.get('Name'), 'Name': bucket.get('Name'), 'User': self._get_tag_name_from_tags(tags=tags, tag_name='User'), 'Date': str(bucket.get('CreationDate')), 'Days': str(empty_days), 'Skip': self._get_policy_value(tags=tags)}) else: empty_days = 0 self._update_resource_tags(resource_id=bucket_name, tags=tags, left_out_days=empty_days, resource_left_out=bucket_empty) diff --git a/cloud_governance/policy/aws/skipped_resources.py b/cloud_governance/policy/aws/skipped_resources.py index 60c22432..8bcfdd24 100644 --- a/cloud_governance/policy/aws/skipped_resources.py +++ b/cloud_governance/policy/aws/skipped_resources.py @@ -28,7 +28,7 @@ def get_volume_type_prices(self): def get_resources(self, resource_name: str): """ - This method return resource data based on resource name + This method returns resource data based on resource name @param resource_name: @return: """ @@ -45,7 +45,7 @@ def get_resources(self, resource_name: str): def get_ebs_cost(self, volume_id: str): """ - This method return the size of the ebs_volume + This method returns the size of the ebs_volume @param volume_id: @return: """ @@ -57,7 +57,7 @@ def get_ebs_cost(self, volume_id: str): def get_instance_volume_size(self, resource: dict): """ - This method return size of the attached volumes of the instance + This method returns size of the attached volumes of the instance @param resource: @return: """ @@ -106,7 +106,7 @@ def get_not_delete_resources(self): def run(self): """ - This method return all tag "Not_Delete" or "skip" resources + This method returns all tag "Not_Delete" or "skip" resources @return: """ resources_data = self.get_not_delete_resources() diff --git a/cloud_governance/policy/aws/spot_savings_analysis.py b/cloud_governance/policy/aws/spot_savings_analysis.py new file mode 100644 index 00000000..5c0d7e40 --- /dev/null +++ b/cloud_governance/policy/aws/spot_savings_analysis.py @@ -0,0 +1,116 @@ + +from datetime import datetime + +import typeguard + +from cloud_governance.common.clouds.aws.athena.pyathena_operations import PyAthenaOperations +from cloud_governance.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations +from cloud_governance.common.logger.init_logger import logger +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.main.environment_variables import environment_variables + + +class SpotSavingsAnalysis: + """ + This class contain the spt savings analysis reports from the athena query + that are gathered from the AWS Cost and Usage Reports. + To get reports from the athena: + 1. Enable the cost-and-usage reports with support of athena integration + 2. Create a Database and table of CUR + """ + + def __init__(self): + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.__default_round_digits = self.__environment_variables_dict.get('DEFAULT_ROUND_DIGITS') + self.__es_index = self.__environment_variables_dict.get('es_index') + self.__database_name = self.__environment_variables_dict.get('ATHENA_DATABASE_NAME') + self.__table_name = self.__environment_variables_dict.get('ATHENA_TABLE_NAME') + self.__es_operations = ElasticSearchOperations() + + def __get_prepared_query(self): + """ + This method prepare the query + :return: + """ + current_date = datetime.utcnow() + year = current_date.year + current_month = current_date.month + previous_month = current_month - 1 if current_month - 1 != 0 else 12 + query = f""" + SELECT + date_format(line_item_usage_start_date, '%Y-%m-%d') as CurrentDate, + date_format(bill_billing_period_start_date, '%Y-%m-%d') as MonthStartDate, + line_item_usage_account_id as AccountId, + line_item_product_code as ProductCode, + product_region as Region, + product_instance_type as InstanceType, + cost_category_cost_center as CostCenter, + cost_category_o_us as CostCategory, + cost_category_organization as RHOrg, + ROUND(SUM(discount_total_discount), 3) as TotalDiscount, + ROUND(SUM(line_item_usage_amount), {self.__default_round_digits}) as UsageAmount, + ROUND(SUM(line_item_unblended_cost + discount_total_discount), {self.__default_round_digits}) as UnblendedCost, + ROUND(SUM(pricing_public_on_demand_cost), {self.__default_round_digits}) as OnDemand, + ROUND(SUM(pricing_public_on_demand_cost - line_item_unblended_cost), {self.__default_round_digits}) as SpotSavings + FROM "{self.__database_name}"."{self.__table_name}" + WHERE "product_product_name" = 'Amazon Elastic Compute Cloud' + AND "line_item_resource_id" LIKE 'i-%' + AND "line_item_operation" LIKE 'RunInstance%' + AND "product_marketoption" = 'Spot' + AND month(bill_billing_period_start_date) in ({previous_month}, {current_month}) + AND year(bill_billing_period_start_date) = {year} + AND pricing_public_on_demand_cost <> 0 + GROUP BY 1, 2, 3, 4, 5, 6, 7, 8, 9 ORDER BY MonthStartDate desc + """ + return query + + def __get_query_results(self): + """ + This method get queries data + :return: + """ + result = {} + query_string = self.__get_prepared_query() + if query_string: + athena_operations = PyAthenaOperations() + result = athena_operations.execute_query(query_string) + else: + logger.debug(f"query string is not provided, exit without query execution") + return result + + @typeguard.typechecked + def __get_data_to_upload_to_es(self, athena_data_dictionary: list): + """ + This method returns ready upload dict to upload to elasticsearch + :return: + """ + for row_dict in athena_data_dictionary: + month_start_date = row_dict.get('MonthStartDate') + month_name_year = datetime.strftime(datetime.strptime(month_start_date, '%Y-%m-%d'), '%Y %b') + row_dict['Month'] = month_name_year + row_dict['filter_date'] = f'{month_start_date}-{month_name_year.split()[-1]}' + row_dict['AccountIdCostCenter'] = f'{row_dict.get("AccountId")}-{row_dict.get("CostCenter")}' + row_dict['index_id'] = f'{row_dict.get("CurrentDate")}-' \ + f'{row_dict.get("AccountId")}-' \ + f'{row_dict.get("Region")}-{row_dict.get("InstanceType")}' + row_dict['AWSCostCenter'] = f'AWS-{row_dict.get("CostCenter")}' + + @logger_time_stamp + def __collect_reports_and_upload_es(self): + """ + This method collects the data and uploads to elastic search + :return: + """ + query_result = self.__get_query_results() + if query_result: + self.__get_data_to_upload_to_es(athena_data_dictionary=query_result) + if query_result: + self.__es_operations.upload_data_in_bulk(data_items=query_result, id='index_id', index=self.__es_index) + + @logger_time_stamp + def run(self): + """ + This is the starting of the methods + :return: + """ + self.__collect_reports_and_upload_es() diff --git a/cloud_governance/policy/aws/unused_nat_gateway.py b/cloud_governance/policy/aws/unused_nat_gateway.py new file mode 100644 index 00000000..cdbb214b --- /dev/null +++ b/cloud_governance/policy/aws/unused_nat_gateway.py @@ -0,0 +1,92 @@ +import datetime + + +from cloud_governance.common.clouds.aws.cloudwatch.cloudwatch_operations import CloudWatchOperations +from cloud_governance.policy.policy_operations.aws.zombie_non_cluster.run_zombie_non_cluster_policies import NonClusterZombiePolicy + + +class UnusedNatGateway(NonClusterZombiePolicy): + """ + This class sends an alert mail for zombie Nat gateways ( based on vpc routes ) + to the user after 4 days and delete after 7 days. + """ + + NAMESPACE = 'AWS/NATGateway' + UNUSED_DAYS = 1 + + def __init__(self): + super().__init__() + self._cloudwatch = CloudWatchOperations(region=self._region) + + def __check_cloud_watch_logs(self, resource_id: str, days: int = UNUSED_DAYS): + """ + This method returns weather the NatGateway is used in last input days + :param resource_id: + :param days: + :return: + """ + if days == 0: + days = 1 + end_time = datetime.datetime.utcnow() + start_time = end_time - datetime.timedelta(days=days) + response = self._cloudwatch.get_metric_data(start_time=start_time, end_time=end_time, resource_id=resource_id, + resource_type='NatGatewayId', namespace=self.NAMESPACE, + metric_names={'ActiveConnectionCount': 'Count'}, + statistic='Average')['MetricDataResults'][0] + for value in response.get('Values'): + if value > 0: + return False + return True + + def __check_nat_gateway_in_routes(self, nat_gateway_id: str): + """ + This method check the nat gateway present in the routes or not. + :param nat_gateway_id: + :return: + """ + route_tables = self._ec2_client.describe_route_tables()['RouteTables'] + nat_gateway_found = False + for route_table in route_tables: + for route in route_table.get('Routes'): + if route.get('NatGatewayId') == nat_gateway_id: + nat_gateway_found = True + return nat_gateway_found + + def run(self): + """ + This method returns zombie NatGateways, delete if dry_run no + @return: + """ + nat_gateways = self._ec2_operations.get_nat_gateways() + nat_gateway_unused_data = [] + for nat_gateway in nat_gateways: + if self._get_policy_value(tags=nat_gateway.get('Tags', [])) not in ('NOTDELETE', 'SKIP'): + nat_gateway_id = nat_gateway.get('NatGatewayId') + tags = nat_gateway.get('Tags') + gateway_unused = False + last_used_days = int(self._ec2_operations.get_tag_value_from_tags(tags=tags, tag_name='LastUsedDay', default_value=1)) + if not self._check_cluster_tag(tags=tags): + if nat_gateway.get('State') == 'available': + if not self.__check_nat_gateway_in_routes(nat_gateway_id=nat_gateway_id) or self.__check_cloud_watch_logs(days=last_used_days, resource_id=nat_gateway_id): + gateway_unused = True + unused_days = self._get_resource_last_used_days(tags=tags) + zombie_nat_gateway = self._check_resource_and_delete(resource_name='NatGateway', + resource_id='NatGatewayId', + resource_type='CreateNatGateway', + resource=nat_gateway, + empty_days=unused_days, + days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE, + tags=tags) + if zombie_nat_gateway: + nat_gateway_unused_data.append( + {'ResourceId': nat_gateway_id, + 'Name': self._get_tag_name_from_tags(tags=tags, tag_name='Name'), + 'User': self._get_tag_name_from_tags(tags=tags, tag_name='User'), + 'VpcId': zombie_nat_gateway.get('VpcId'), + 'Skip': self._get_policy_value(tags=tags), + 'Days': unused_days, 'Policy': self._policy}) + else: + unused_days = 0 + self._update_resource_tags(resource_id=nat_gateway_id, tags=tags, left_out_days=unused_days, + resource_left_out=gateway_unused) + return nat_gateway_unused_data diff --git a/cloud_governance/policy/aws/zombie_cluster_resource.py b/cloud_governance/policy/aws/zombie_cluster_resource.py index 00743617..203e9fac 100644 --- a/cloud_governance/policy/aws/zombie_cluster_resource.py +++ b/cloud_governance/policy/aws/zombie_cluster_resource.py @@ -38,7 +38,7 @@ def __init__(self, cluster_prefix: str = None, delete: bool = False, region: str def all_cluster_instance(self): """ - This method return list of cluster's instance tag name that contains openshift tag prefix from all regions + This method returns list of cluster's instance tag name that contains openshift tag prefix from all regions :return: list of cluster's instance tag name """ instances_list = [] @@ -63,7 +63,7 @@ def all_cluster_instance(self): def _cluster_instance(self): """ - This method return list of cluster's instance tag name that contains openshift tag prefix + This method returns list of cluster's instance tag name that contains openshift tag prefix :return: list of cluster's instance tag name """ instances_list = [] @@ -85,7 +85,7 @@ def _cluster_instance(self): def __get_cluster_resources(self, resources_list: list, input_resource_id: str, tags: str = 'Tags'): """ - This method return all cluster resources keys that start with cluster prefix + This method returns all cluster resources keys that start with cluster prefix :param resources_list: :param tags: :return: dictionary of the resources key and id @@ -181,7 +181,7 @@ def __get_all_zombie_resources(self, exist_resources: dict): def zombie_cluster_volume(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of cluster's volume according to cluster tag name and cluster name data + This method returns list of cluster's volume according to cluster tag name and cluster name data delete only available resource that related to cluster """ available_volumes = [] @@ -200,14 +200,14 @@ def zombie_cluster_volume(self, vpc_id: str = '', cluster_tag_vpc: str = ''): if delete_cluster_resource and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource_id=zombie, resource='ec2_volume') else: - if self._force_delete: + if self._force_delete and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource_id=zombie, resource='ec2_volume') return zombies, cluster_left_out_days def zombie_cluster_ami(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of cluster's ami according to cluster tag name and cluster name data + This method returns list of cluster's ami according to cluster tag name and cluster name data """ images_data = self.ec2_operations.get_images() exist_ami = self.__get_cluster_resources(resources_list=images_data, input_resource_id='ImageId') @@ -225,7 +225,7 @@ def zombie_cluster_ami(self, vpc_id: str = '', cluster_tag_vpc: str = ''): self.ec2_client.deregister_image(ImageId=zombie) logger.info(f'deregister_image: {zombie}') else: - if self._force_delete: + if self._force_delete and self.delete: self.ec2_client.deregister_image(ImageId=zombie) logger.info(f'deregister_image: {zombie}') except Exception as err: @@ -234,7 +234,7 @@ def zombie_cluster_ami(self, vpc_id: str = '', cluster_tag_vpc: str = ''): def zombie_cluster_snapshot(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of cluster's snapshot according to cluster tag name and cluster name data + This method returns list of cluster's snapshot according to cluster tag name and cluster name data """ snapshots_data = self.ec2_operations.get_snapshots() exist_snapshot = self.__get_cluster_resources(resources_list=snapshots_data, input_resource_id='SnapshotId') @@ -247,7 +247,7 @@ def zombie_cluster_snapshot(self, vpc_id: str = '', cluster_tag_vpc: str = ''): if delete_cluster_resource and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource='ebs_snapshots', resource_id=zombie) else: - if self._force_delete: + if self._force_delete and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource='ebs_snapshots', resource_id=zombie) return zombies, cluster_left_out_days @@ -318,7 +318,7 @@ def __get_zombies_by_vpc_id(self, vpc_id: str, resources: list, output_tag: str, def zombie_cluster_security_group(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of zombie cluster's security groups compare to existing instances and cluster name data + This method returns list of zombie cluster's security groups compare to existing instances and cluster name data :return: list of zombie cluster's security groups """ security_groups = self.ec2_operations.get_security_groups() @@ -338,7 +338,7 @@ def zombie_cluster_security_group(self, vpc_id: str = '', cluster_tag_vpc: str = for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource('security_group', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource('security_group', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag) @@ -346,7 +346,7 @@ def zombie_cluster_security_group(self, vpc_id: str = '', cluster_tag_vpc: str = def zombie_cluster_elastic_ip(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of zombie cluster's elastic ip according to existing instances and cluster name data + This method returns list of zombie cluster's elastic ip according to existing instances and cluster name data """ exist_elastic_ip_association = [] exist_elastic_ip_allocation = [] @@ -377,14 +377,14 @@ def zombie_cluster_elastic_ip(self, vpc_id: str = '', cluster_tag_vpc: str = '') if delete_cluster_resource and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource='elastic_ip', resource_id=zombie, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource='elastic_ip', resource_id=zombie, cluster_tag=cluster_tag) zombies = {**zombies_all} return zombies, cluster_left_out_days def zombie_cluster_network_interface(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of zombie cluster's network interface according to existing instances and cluster name data + This method returns list of zombie cluster's network interface according to existing instances and cluster name data """ network_interfaces_data = self.ec2_operations.get_network_interface() exist_network_interface = self.__get_cluster_resources(resources_list=network_interfaces_data, input_resource_id='NetworkInterfaceId', tags='TagSet') @@ -406,14 +406,14 @@ def zombie_cluster_network_interface(self, vpc_id: str = '', cluster_tag_vpc: st for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='network_interface', resource_id=zombie_id, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='network_interface', resource_id=zombie_id, cluster_tag=cluster_tag) return zombies, cluster_left_out_days def zombie_cluster_load_balancer(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of cluster's load balancer according to cluster vpc and cluster name data + This method returns list of cluster's load balancer according to cluster vpc and cluster name data """ exist_load_balancer = {} @@ -441,13 +441,13 @@ def zombie_cluster_load_balancer(self, vpc_id: str = '', cluster_tag_vpc: str = if delete_cluster_resource and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource='load_balancer', resource_id=zombie, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource='load_balancer', resource_id=zombie, cluster_tag=cluster_tag) return zombies, cluster_left_out_days def zombie_cluster_load_balancer_v2(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of cluster's load balancer according to cluster vpc and cluster name data + This method returns list of cluster's load balancer according to cluster vpc and cluster name data """ exist_load_balancer = {} load_balancers_data = self.ec2_operations.get_load_balancers_v2() @@ -474,13 +474,13 @@ def zombie_cluster_load_balancer_v2(self, vpc_id: str = '', cluster_tag_vpc: str if delete_cluster_resource and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource='load_balancer_v2', resource_id=zombie, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource='load_balancer_v2', resource_id=zombie, cluster_tag=cluster_tag) return zombies, cluster_left_out_days def __get_all_exist_vpcs(self): """ - This method return all exist vpc ids (for supporting Network ACL - missing OpenShift tags) + This method returns all exist vpc ids (for supporting Network ACL - missing OpenShift tags) :return: """ vpcs_data = self.ec2_operations.get_vpcs() @@ -491,7 +491,7 @@ def __get_all_exist_vpcs(self): def zombie_cluster_vpc(self): """ - This method return list of cluster's vpc according to cluster tag name and cluster name data + This method returns list of cluster's vpc according to cluster tag name and cluster name data """ vpcs_data = self.ec2_operations.get_vpcs() exist_vpc = self.__get_cluster_resources(resources_list=vpcs_data, input_resource_id='VpcId') @@ -510,14 +510,14 @@ def zombie_cluster_vpc(self): if delete_cluster_resource and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource='vpc', resource_id=zombie, pending_resources=delete_dict, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: self.delete_ec2_resource.delete_zombie_resource(resource='vpc', resource_id=zombie, pending_resources=delete_dict, cluster_tag=cluster_tag) return zombies, cluster_left_out_days def zombie_cluster_subnet(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of cluster's subnet according to cluster tag name and cluster name data + This method returns list of cluster's subnet according to cluster tag name and cluster name data """ subnets_data = self.ec2_operations.get_subnets() exist_subnet = self.__get_cluster_resources(resources_list=subnets_data, input_resource_id='SubnetId') @@ -537,14 +537,14 @@ def zombie_cluster_subnet(self, vpc_id: str = '', cluster_tag_vpc: str = ''): for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='subnet', resource_id=zombie_id, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='subnet', resource_id=zombie_id, cluster_tag=cluster_tag) return zombies, cluster_left_out_days def zombie_cluster_route_table(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of cluster's route table according to cluster tag name and cluster name data + This method returns list of cluster's route table according to cluster tag name and cluster name data """ route_tables_data = self.ec2_operations.get_route_tables() exist_route_table = self.__get_cluster_resources(resources_list=route_tables_data, input_resource_id='RouteTableId') @@ -564,14 +564,14 @@ def zombie_cluster_route_table(self, vpc_id: str = '', cluster_tag_vpc: str = '' for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='route_table', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='route_table', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag) return zombies, cluster_left_out_days def zombie_cluster_internet_gateway(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of cluster's route table internet gateway according to cluster tag name and cluster name data + This method returns list of cluster's route table internet gateway according to cluster tag name and cluster name data """ internet_gateways_data = self.ec2_operations.get_internet_gateways() exist_internet_gateway = self.__get_cluster_resources(resources_list=internet_gateways_data, input_resource_id='InternetGatewayId') @@ -593,7 +593,7 @@ def zombie_cluster_internet_gateway(self, vpc_id: str = '', cluster_tag_vpc: str for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='internet_gateway', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='internet_gateway', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag) @@ -601,7 +601,7 @@ def zombie_cluster_internet_gateway(self, vpc_id: str = '', cluster_tag_vpc: str def zombie_cluster_dhcp_option(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of cluster's dhcp option according to cluster tag name and cluster name data + This method returns list of cluster's dhcp option according to cluster tag name and cluster name data """ dhcp_options_data = self.ec2_operations.get_dhcp_options() exist_dhcp_option = self.__get_cluster_resources(resources_list=dhcp_options_data, input_resource_id='DhcpOptionsId') @@ -619,7 +619,7 @@ def zombie_cluster_dhcp_option(self, vpc_id: str = '', cluster_tag_vpc: str = '' else: self.delete_ec2_resource.delete_zombie_resource(resource='dhcp_options', resource_id=zombie) else: - if self._force_delete: + if self._force_delete and self.delete: if vpc_id: self.delete_ec2_resource.delete_zombie_resource(resource='dhcp_options', resource_id=zombie, vpc_id=vpc_id) else: @@ -628,7 +628,7 @@ def zombie_cluster_dhcp_option(self, vpc_id: str = '', cluster_tag_vpc: str = '' def zombie_cluster_vpc_endpoint(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of cluster's vpc endpoint according to cluster tag name and cluster name data + This method returns list of cluster's vpc endpoint according to cluster tag name and cluster name data """ vpc_endpoints_data = self.ec2_operations.get_vpce() exist_vpc_endpoint = self.__get_cluster_resources(resources_list=vpc_endpoints_data, input_resource_id='VpcEndpointId') @@ -650,14 +650,14 @@ def zombie_cluster_vpc_endpoint(self, vpc_id: str = '', cluster_tag_vpc: str = ' for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='vpc_endpoints', resource_id=zombie_id, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='vpc_endpoints', resource_id=zombie_id, cluster_tag=cluster_tag) return zombies, cluster_left_out_days def zombie_cluster_nat_gateway(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of zombie cluster's nat gateway according to cluster tag name and cluster name data + This method returns list of zombie cluster's nat gateway according to cluster tag name and cluster name data """ nat_gateways_data = self.ec2_operations.get_nat_gateways() exist_nat_gateway = self.__get_cluster_resources(resources_list=nat_gateways_data, input_resource_id='NatGatewayId') @@ -676,14 +676,14 @@ def zombie_cluster_nat_gateway(self, vpc_id: str = '', cluster_tag_vpc: str = '' for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='nat_gateways', resource_id=zombie_id, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='nat_gateways', resource_id=zombie_id, cluster_tag=cluster_tag) return zombies, cluster_left_out_days def zombie_cluster_network_acl(self, vpc_id: str = '', cluster_tag_vpc: str = ''): """ - This method return list of zombie cluster's network acl according to existing vpc id and cluster name data + This method returns list of zombie cluster's network acl according to existing vpc id and cluster name data """ exist_network_acl = {} network_acls_data = self.ec2_operations.get_nacls() @@ -713,14 +713,14 @@ def zombie_cluster_network_acl(self, vpc_id: str = '', cluster_tag_vpc: str = '' for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='network_acl', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag) else: - if self._force_delete: + if self._force_delete and self.delete: for zombie_id in zombie_ids: self.delete_ec2_resource.delete_zombie_resource(resource='network_acl', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag) return zombies, cluster_left_out_days def zombie_cluster_role(self): """ - This method return list of cluster's role in all regions according to cluster name and cluster name data + This method returns list of cluster's role in all regions according to cluster name and cluster name data * Role is a global resource, need to scan for live cluster in all regions """ exist_role_name_tag = {} @@ -751,13 +751,13 @@ def zombie_cluster_role(self): if delete_cluster_resource and self.delete: self.delete_iam_resource.delete_iam_zombie_resource(resource_id=zombie, resource_type='iam_role') else: - if self._force_delete: + if self._force_delete and self.delete: self.delete_iam_resource.delete_iam_zombie_resource(resource_id=zombie, resource_type='iam_role') return zombies, cluster_left_out_days def zombie_cluster_user(self): """ - This method return list of cluster's user according to cluster name and cluster name data + This method returns list of cluster's user according to cluster name and cluster name data * User is a global resource, need to scan for live cluster in all regions """ exist_user_name_tag = {} @@ -785,14 +785,14 @@ def zombie_cluster_user(self): if delete_cluster_resource and self.delete: self.delete_iam_resource.delete_iam_zombie_resource(resource_id=zombie, resource_type='iam_user') else: - if self._force_delete: + if self._force_delete and self.delete: self.delete_iam_resource.delete_iam_zombie_resource(resource_id=zombie, resource_type='iam_user') return zombies, cluster_left_out_days def zombie_cluster_s3_bucket(self, cluster_stamp: str = 'image-registry'): """ - This method return list of cluster's s3 bucket according to cluster name and cluster name data + This method returns list of cluster's s3 bucket according to cluster name and cluster name data * S3 is a global resource, need to scan for live cluster in all regions """ exist_bucket_name_tag = {} @@ -824,7 +824,7 @@ def zombie_cluster_s3_bucket(self, cluster_stamp: str = 'image-registry'): if delete_cluster_resource and self.delete: self.delete_s3_resource.delete_zombie_s3_resource(resource_type='s3_bucket', resource_id=zombie) else: - if self._force_delete: + if self._force_delete and self.delete: self.delete_s3_resource.delete_zombie_s3_resource(resource_type='s3_bucket', resource_id=zombie) return zombies, cluster_left_out_days diff --git a/cloud_governance/policy/aws/zombie_snapshots.py b/cloud_governance/policy/aws/zombie_snapshots.py index 7a8ca2d7..58b15873 100644 --- a/cloud_governance/policy/aws/zombie_snapshots.py +++ b/cloud_governance/policy/aws/zombie_snapshots.py @@ -25,7 +25,7 @@ def _get_image_ids_from_description(self, snapshot_description: str): def run(self): """ - This method return all the zombie snapshots, delete if dry_run no + This method returns all the zombie snapshots, delete if dry_run no @return: """ snapshots = self._ec2_operations.get_snapshots() @@ -51,12 +51,13 @@ def run(self): days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE, tags=tags) if zombie_snapshot: - zombie_snapshots.append([snapshot.get('SnapshotId'), - self._get_tag_name_from_tags(tags=tags), - self._get_tag_name_from_tags(tags=tags, tag_name='User'), - f'{str(snapshot.get("VolumeSize"))}Gb', - self._get_policy_value(tags=snapshot.get('Tags')), str(unused_days) - ]) + zombie_snapshots.append({'ResourceId': snapshot.get('SnapshotId'), + 'Name': self._get_tag_name_from_tags(tags=tags), + 'User': self._get_tag_name_from_tags(tags=tags, tag_name='User'), + 'Size': f'{str(snapshot.get("VolumeSize"))}Gb', + 'Skip': self._get_policy_value(tags=snapshot.get('Tags')), + 'Days': str(unused_days) + }) else: unused_days = 0 self._update_resource_tags(resource_id=snapshot_id, tags=tags, left_out_days=unused_days, diff --git a/cloud_governance/policy/azure/cost_billing_reports.py b/cloud_governance/policy/azure/cost_billing_reports.py index 2a80a74a..ad7c11fa 100644 --- a/cloud_governance/policy/azure/cost_billing_reports.py +++ b/cloud_governance/policy/azure/cost_billing_reports.py @@ -17,9 +17,6 @@ class CostBillingReports: This class is responsible for generation cost billing report for Budget, Actual, Forecast """ - COST_CENTER_OWNER = 'Shai' - COST_CENTER_OWNER_OTHERS = 'Others' - def __init__(self): self.__environment_variables_dict = environment_variables.environment_variables_dict self.__total_account = self.__environment_variables_dict.get('TOTAL_ACCOUNTS', '') @@ -29,7 +26,7 @@ def __init__(self): self.gdrive_operations = GoogleDriveOperations() self.__gsheet_id = self.__environment_variables_dict.get('SPREADSHEET_ID') self.update_to_gsheet = UploadToGsheet() - self.__cost_center, self.__allocated_budget, self.__years = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.azure_operations.subscription_id, dir_path='/tmp') + self.__cost_center, self.__allocated_budget, self.__years, self.__owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.azure_operations.subscription_id, dir_path='/tmp') self.__common_data = self.get_common_data() def get_common_data(self): @@ -44,7 +41,7 @@ def get_common_data(self): upload_data['AllocatedBudget'] = 0 upload_data['CostCenter'] = int(self.__cost_center) upload_data['CloudName'] = self.azure_operations.cloud_name - upload_data['Owner'] = self.COST_CENTER_OWNER_OTHERS + upload_data['Owner'] = self.__owner return upload_data @logger_time_stamp @@ -66,12 +63,12 @@ def get_data_from_costs(self, cost_data_rows: list, cost_data_columns: list, cos if cost_center > 0: common_data['CostCenter'] = cost_center if subscription_id: - cost_center, allocated_budget, years = self.update_to_gsheet.get_cost_center_budget_details(account_id=subscription_id, dir_path='/tmp') + cost_center, allocated_budget, years, owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=subscription_id, dir_path='/tmp') if cost_center: common_data['CostCenter'] = int(cost_center) - common_data['Owner'] = self.COST_CENTER_OWNER + common_data['Owner'] = owner else: - common_data['Owner'] = self.COST_CENTER_OWNER_OTHERS + common_data['Owner'] = owner else: allocated_budget, years = self.__allocated_budget, self.__years for index, item in enumerate(cost_data_rows): @@ -87,12 +84,12 @@ def get_data_from_costs(self, cost_data_rows: list, cost_data_columns: list, cos common_data['Account'] = item[key] elif column.get('name') == 'SubscriptionId': common_data['AccountId'] = item[key] - cost_center, allocated_budget, years = self.update_to_gsheet.get_cost_center_budget_details(account_id=item[key], dir_path='/tmp') + cost_center, allocated_budget, years, owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=item[key], dir_path='/tmp') if cost_center: common_data['CostCenter'] = int(cost_center) - common_data['Owner'] = self.COST_CENTER_OWNER + common_data['Owner'] = owner else: - common_data['Owner'] = self.COST_CENTER_OWNER_OTHERS + common_data['Owner'] = owner else: if column.get('type') == 'Datetime': start_date = item[key].split('T')[0] diff --git a/cloud_governance/policy/common_policies/__init__.py b/cloud_governance/policy/common_policies/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloud_governance/policy/common_policies/send_aggregated_alerts.py b/cloud_governance/policy/common_policies/send_aggregated_alerts.py new file mode 100644 index 00000000..741a7444 --- /dev/null +++ b/cloud_governance/policy/common_policies/send_aggregated_alerts.py @@ -0,0 +1,159 @@ +import json +import logging +import os +import tempfile +from datetime import date, datetime, timedelta + +import typeguard +from botocore.exceptions import ClientError + +from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations +from cloud_governance.common.clouds.aws.s3.s3_operations import S3Operations +from cloud_governance.common.jira.jira import logger +from cloud_governance.common.logger.init_logger import handler +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.common.mails.mail_message import MailMessage +from cloud_governance.common.mails.postfix import Postfix +from cloud_governance.main.environment_variables import environment_variables + + +class SendAggregatedAlerts: + """ + This class send alerts to users which conditions are not satisfied by the policies + """ + + FILE_NAME = 'resources.json' + GLOBAL_REGION = 'us-east-1' + TODAY_DATE = str(date.today()).replace('-', '/') + + def __init__(self): + self.__environment_variables = environment_variables.environment_variables_dict + self.__bucket_name = self.__environment_variables.get('BUCKET_NAME') + self.__bucket_key = self.__environment_variables.get('BUCKET_KEY') + self.__policies = self.__environment_variables.get('POLICIES_TO_ALERT') + self.__s3_operations = S3Operations(region_name='us-east-2', bucket=self.__bucket_name, logs_bucket_key=self.__bucket_key) + self.__active_regions = EC2Operations().get_active_regions() + self.__kerberos_users = self.__get_kerberos_users_for_iam_users() + self.__global_region_policies = ['s3-inactive', 'empty-roles'] + self.__mail_alert_days = self.__environment_variables.get('MAIL_ALERT_DAYS') + self.__policy_action_days = self.__environment_variables.get('POLICY_ACTIONS_DAYS') + self.__mail_message = MailMessage() + self.__postfix = Postfix() + + @logger_time_stamp + def __get_kerberos_users_for_iam_users(self): + """ + This method returns the users which IAM users are not kerberos username + :return: + """ + responses = {} + users = self.__environment_variables.get('KERBEROS_USERS') + for iam_user, kerberos_user in users.items(): + responses[iam_user.lower()] = kerberos_user.lower() + return responses + + def __get_users_agg_result(self, policy_result: list, agg_users_result: dict, policy_name: str, region: str): + """ + This method returns the aggregated users resources list + :param agg_users_result: + :param policy_result: + :return: + """ + if policy_result: + for response in policy_result: + if type(response) == dict: + skip_policy = response.get('Skip') + if skip_policy in ('NA', '', None): + user = response.pop('User').lower() + response['Region'] = region + response['Policy'] = policy_name + if user in self.__kerberos_users.keys(): + user = self.__kerberos_users.get(user) + agg_users_result.setdefault(user, []).append(response) + + def __get_policy_data_in_bucket(self, region: str, policy: str): + """ + This method returns the policy data in s3 bucket + :param region: + :param policy: + :return: + """ + try: + policy_save_path = f'{self.__bucket_key}/{region}/{policy}' + bucket_path_file = self.__s3_operations.get_last_objects(bucket=self.__bucket_name, key_prefix=f'{policy_save_path}/{self.TODAY_DATE}') + policy_s3_response = self.__s3_operations.get_last_s3_policy_content(s3_file_path=bucket_path_file, file_name=self.FILE_NAME) + return json.loads(policy_s3_response) if policy_s3_response else [] + except ClientError as err: + logger.info(err) + return [] + + @logger_time_stamp + def __get_policy_users_list(self): + """ + This method gets the latest policy responses + :return: + """ + agg_users_result = {} + for policy in self.__policies: + run_global_region = True if policy in self.__global_region_policies else False + for region in self.__active_regions: + if (region == self.GLOBAL_REGION and run_global_region) or not run_global_region: + self.__get_users_agg_result(policy_result=self.__get_policy_data_in_bucket(region=region, policy=policy), + agg_users_result=agg_users_result, policy_name=policy, region=region) + if region == self.GLOBAL_REGION and run_global_region: + break + return agg_users_result + + def __get_policy_agg_data_by_region(self, policy_data: dict): + """ + This method returns the policy data agg by region + :param policy_data: + :return: + """ + agg_policy_region_result = {} + for policy_name, policy_region_data in policy_data.items(): + agg_policy_region_result[policy_name] = {} + for region_data in policy_region_data: + region_name = region_data.get('Region').lower() + agg_policy_region_result[policy_name].setdefault(region_name, []).append(region_data) + return agg_policy_region_result + + @logger_time_stamp + def __get_policy_agg_data(self, user_policy_data: list): + """ + This method returns the data agg by policy + :param user_policy_data: + :return: + """ + agg_policy_result = {} + for result in user_policy_data: + policy_name = result.get('Policy').lower() + days = int(result.get('Days', 0)) + if days in self.__mail_alert_days or days in self.__policy_action_days: + result['Action'] = 'Deleted' if days in self.__policy_action_days else 'Monitoring' + result['DeletedDay'] = (datetime.now() + timedelta(days=self.__policy_action_days[0] - days)).date() + agg_policy_result.setdefault(policy_name, []).append(result) + return self.__get_policy_agg_data_by_region(policy_data=agg_policy_result) + + @logger_time_stamp + def __send_mail_alerts_to_users(self): + """ + This method send mail alerts to users + :return: + """ + policy_agg_users_list = self.__get_policy_users_list() + for user, user_policy_data in policy_agg_users_list.items(): + handler.setLevel(logging.WARN) + agg_policy_data = self.__get_policy_agg_data(user_policy_data=user_policy_data) + if agg_policy_data: + handler.setLevel(logging.INFO) + subject, body = self.__mail_message.get_agg_policies_mail_message(user=user, user_resources=agg_policy_data) + self.__postfix.send_email_postfix(subject=subject, content=body, to=user, cc=[], mime_type='html') + + @logger_time_stamp + def run(self): + """ + This method start the other methods + :return: + """ + self.__send_mail_alerts_to_users() diff --git a/cloud_governance/policy/gcp/__init__.py b/cloud_governance/policy/gcp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloud_governance/policy/gcp/cost_billing_reports.py b/cloud_governance/policy/gcp/cost_billing_reports.py new file mode 100644 index 00000000..668a54fd --- /dev/null +++ b/cloud_governance/policy/gcp/cost_billing_reports.py @@ -0,0 +1,293 @@ +import json +import os +from datetime import datetime, timedelta +from ast import literal_eval + +from typeguard import typechecked + +from cloud_governance.common.clouds.gcp.google_account import GoogleAccount +from cloud_governance.common.elasticsearch.elastic_upload import ElasticUpload +from cloud_governance.common.google_drive.upload_to_gsheet import UploadToGsheet +from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp +from cloud_governance.main.environment_variables import environment_variables +from cloud_governance.common.logger.init_logger import logger + + +class CostBillingReports: + """ + This class is responsible for generation cost billing report for Budget, Actual, Forecast + """ + + DEFAULT_YEARS = 12 + DEFAULT_ROUND_DIGITS = 3 + + def __init__(self): + self.__environment_variables_dict = environment_variables.environment_variables_dict + self.__database_name = self.__environment_variables_dict.get('GCP_DATABASE_NAME', '') + self.__table_name = self.__environment_variables_dict.get('GCP_DATABASE_TABLE_NAME', '') + self.__gcp_account = GoogleAccount() + self.__gsheet_id = self.__environment_variables_dict.get('SPREADSHEET_ID') + self.__cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper() + self.update_to_gsheet = UploadToGsheet() + self.elastic_upload = ElasticUpload() + + @logger_time_stamp + def __next_twelve_months(self): + """ + This method returns the next 12 month, year + :return: + """ + months = 12 + year = datetime.now().year + next_month = datetime.now().month + 1 + month_year = [] + for idx in range(months): + month = str((idx+next_month) % months) + c_year = year + if len(month) == 1: + month = f'0{month}' + if month == '00': + month = 12 + year = year+1 + month_year.append((str(month), c_year)) + return month_year + + @typechecked() + @logger_time_stamp + def __prepare_usage_query(self, first_year_month: str = None, second_year_month: str = None): + """ + This method prepare the query for usage + :param first_year_month: #YYYYMM + :param second_year_month: #YYYYMM + :return: + """ + if not first_year_month and not second_year_month: + current_month = datetime.now().replace(day=1) + past_month = current_month - timedelta(days=1) + first_year_month = past_month.strftime("%Y%m") + second_year_month = current_month.strftime("%Y%m") + logger.info(f'StartMonth: {first_year_month}, EndMonth: {second_year_month}') + fetch_monthly_invoice_query = f""" + SELECT ifnull(project.ancestors[SAFE_OFFSET(1)].display_name, 'NA') as folder_name, + ifnull(project.ancestry_numbers, 'NA') as folder_id, invoice.month, ifnull(project.id, 'GCP-refund/credit') as project_name, ifnull(project.number, '000000000000') as project_id, + (SUM(CAST(cost AS NUMERIC)) + SUM(IFNULL((SELECT SUM(CAST(c.amount AS NUMERIC)) + FROM UNNEST(credits) AS c), 0))) AS total_cost + FROM `{self.__database_name}.{self.__table_name}` + where invoice.month BETWEEN '{first_year_month}' AND '{second_year_month}' + GROUP BY 1, 2, 3, 4, 5 + ORDER BY 3 + """ + fetch_monthly_folders_query = f""" + SELECT TO_JSON_STRING(project.ancestors) as project_folders, project.number, invoice.month, ifnull(project.ancestry_numbers, 'NA') as folder_id + FROM `{self.__database_name}.{self.__table_name}` + where invoice.month BETWEEN '{first_year_month}' AND '{second_year_month}' GROUP BY 1, 2, 3, 4 ORDER BY invoice.month + """ + return [fetch_monthly_invoice_query, fetch_monthly_folders_query] + + @typechecked() + @logger_time_stamp + def __organized_results(self, data_rows: list): + """ + This method organize the results to be uploaded to elastic search + :param data_rows: + :return: + """ + compress_gcp_data = {} # compress data based on budget_id + for row in data_rows: + month = row.get('Month') + cost_center, allocated_budget, years, owner = 0, 0, '', 'Others' + project_budget_account_id = 0 + for idx, _id in enumerate((row.get('folder_ids')+[row.get('ProjectId')])[::-1]): # start from reverse [root, sub_child, child] + cost_center, allocated_budget, years, owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=_id, dir_path='/tmp') + if cost_center > 0: + project_budget_account_id = _id + break + parent_index = len(row.get("folder_ids")) + index = f'{project_budget_account_id}-{month}' + if index in compress_gcp_data: + compress_gcp_data[index]['Actual'] = round(compress_gcp_data[index]['Actual'] + row.get('Actual'), 3) + compress_gcp_data[index]['Projects'].append({ + 'Project': row.get('Project'), + 'Actual': round(row.get('Actual'), self.DEFAULT_ROUND_DIGITS), + 'ProjectId': row.get('ProjectId') + }) + if parent_index > compress_gcp_data[index]['total_folders']: + compress_gcp_data[index]['Account'] = row.get(f'parent{parent_index}', 'NA') + compress_gcp_data[index]['AccountId'] = row.get(f'parent{parent_index}_id', 'NA') + else: + project_cost_data = {'CloudName': self.__cloud_name, 'CostCenter': cost_center, 'Owner': owner, + 'Budget': round(allocated_budget / self.DEFAULT_YEARS, self.DEFAULT_ROUND_DIGITS), + 'Forecast': 0, + 'AllocatedBudget': round(allocated_budget, self.DEFAULT_ROUND_DIGITS), + 'BudgetId': project_budget_account_id, + 'Account': row.get(f'parent{parent_index}', 'NA'), + 'AccountId': row.get(f'parent{parent_index}_id', 'NA'), + 'Actual': round(row.get('Actual'), self.DEFAULT_ROUND_DIGITS), + 'filter_date': row.get('filter_date'), 'Month': row.get('Month'), + 'start_date': row.get('start_date'), 'timestamp': row.get('timestamp'), + 'Projects': [{'Project': row.get('Project'), + 'Actual': round(row.get('Actual'), self.DEFAULT_ROUND_DIGITS), + 'ProjectId': row.get('ProjectId')}], + 'index_id': f"{row.get('start_date')}-{row.get(f'parent{parent_index}', 'NA').lower()}", + 'total_folders': parent_index} + compress_gcp_data[index] = project_cost_data + return self.__second_layer_filter(items=list(compress_gcp_data.values())) + + @typechecked() + @logger_time_stamp + def __second_layer_filter(self, items: list): + """ + This method aggregates the results which have the same Account name + :param items: + :return: + """ + filtered_result = {} + for item in items: + account = item.get('Account') + month = item.get('Month') + index = f'{account}-{month}' + if index in filtered_result: + filtered_result[index]['Budget'] += item.get('Budget') + filtered_result[index]['Actual'] += item.get('Actual') + if item.get('BudgetId') != filtered_result[index]['BudgetId']: + filtered_result[index]['AllocatedBudget'] += item.get('AllocatedBudget') + filtered_result[index]['Projects'].extend(item.get('Projects')) + else: + filtered_result[index] = item + return list(filtered_result.values()) + + # @Todo Add forecast values in future + @typechecked() + @logger_time_stamp + def __forecast_for_next_months(self, cost_data: list): + """ + This method returns the forecast of next twelve months data + :param cost_data: + :return: + """ + forecast_cost_data = [] + month_years = self.__next_twelve_months() + month = (datetime.now().month - 1) % 12 + if month == 0: + month = 12 + if len(str(month)) == 1: + month = f'0{month}' + year = datetime.now().year + cache_start_date = f'{year}-{str(month)}-01' + for data in cost_data: + if cache_start_date == data.get('start_date') and data.get('CostCenter') > 0: + for m_y in month_years: + m, y = m_y[0], m_y[1] + start_date = f'{y}-{m}-01' + timestamp = datetime.strptime(start_date, "%Y-%m-%d") + index_id = f'{start_date}-{data.get("Account").lower()}' + month = datetime.strftime(timestamp, "%Y %b") + projects = [] + for project in data.get('Projects'): + project['Actual'] = 0 + projects.append(project) + forecast_cost_data.append({ + **data, + 'Actual': 0, + 'start_date': start_date, + 'timestamp': timestamp, + 'index_id': index_id, + 'Projects': projects, + 'filter_date': f'{start_date}-{month.split()[-1]}', + 'Month': month} + ) + return forecast_cost_data + + @typechecked() + @logger_time_stamp + def __get_aggregated_folder_details(self, query_data: list): + """ + This method gives the unique folder_names from the data + :param query_data: + :return: + """ + project_folders = {} + for data in query_data: + index = f'{data.get("number")}' + month = data.get('month') + project_folder_id = data.get('folder_id') + insert_data = False + if index not in project_folders: + insert_data = True + else: + insert_data = month >= project_folders.get(index).get('month') + if insert_data: + updated_data = {'month': month, 'folder_id': project_folder_id} + for folders in literal_eval(data.get('project_folders')): + folder_id = folders.get('resource_name').split('/')[-1] + folder_name = folders.get('display_name') + updated_data[folder_id] = folder_name + project_folders[index] = updated_data + return project_folders + + @typechecked() + @logger_time_stamp + def __get_parent_folders(self, folder_ids: list, folders_data: dict, project_id: str): + """ + This method returns the list of parent folders of Project + :param folder_ids: + :param folders_data: + :param project_id: + :return: + """ + parent_folders = {} + for idx, _id in enumerate(folder_ids): + parent_folders.update({ + f'parent{idx + 1}': folders_data[project_id].get(_id), + f'parent{idx + 1}_id': _id + }) + return parent_folders + + @logger_time_stamp + def __get_big_query_data(self): + """ + This method collect the data from the big query and filter the data + :return: + """ + cost_usage_queries = self.__prepare_usage_query() + query_rows = self.__gcp_account.query_list(cost_usage_queries) + folders_data = self.__get_aggregated_folder_details(query_rows[1]) + agg_data = {} + for cst_row in query_rows[0]: + project_id, bill_month, total_cost = cst_row.get('project_id').strip(), cst_row.get('month'), float(cst_row.get('total_cost')) + folder_ids = folders_data.get(project_id).get('folder_id').split('/')[2:-1] if folders_data.get(project_id) else cst_row.get('folder_id').split('/')[2:-1] + folder_name = cst_row.get('folder_name') + index = f"{project_id}-{bill_month}" + parents_folders = self.__get_parent_folders(folder_ids, folders_data, project_id) if project_id in folders_data else {} + if agg_data.get(index): + total_cost = float(cst_row.get('total_cost')) + agg_data[index]['Actual'] + agg_data[index] = { + 'folder_name': parents_folders.get(f'parent{len(folder_ids)}', 'NA'), + 'start_date': f'{bill_month[:4]}-{bill_month[4:]}-01', + 'Project': cst_row.get('project_name'), 'ProjectId': project_id, 'Actual': total_cost, + 'Account': parents_folders.get('parent1', 'NA'), 'Forecast': 0, 'folder_ids': folder_ids, **parents_folders + } + agg_data[index]['timestamp'] = datetime.strptime(agg_data[index]['start_date'], '%Y-%m-%d') + month = datetime.strftime(agg_data[index]['timestamp'], "%Y %b") + agg_data[index]['Month'] = month + agg_data[index]['filter_date'] = f'{agg_data[index]["start_date"]}-{month.split()[-1]}' + return self.__organized_results(list(agg_data.values())) + + @logger_time_stamp + def __get_cost_and_upload(self): + """ + This method collect the cost and uploads to the ElasticSearch" + :return: + """ + collected_data = self.__get_big_query_data() + forecast_data = self.__forecast_for_next_months(cost_data=collected_data) + upload_data = collected_data + forecast_data + self.elastic_upload.es_upload_data(items=upload_data, set_index='index_id') + + @logger_time_stamp + def run(self): + """ + This method run the gcp cost explorer methods + :return: + """ + self.__get_cost_and_upload() diff --git a/cloud_governance/policy/ibm/cost_billing_reports.py b/cloud_governance/policy/ibm/cost_billing_reports.py index 5e130ad9..efbfe29a 100644 --- a/cloud_governance/policy/ibm/cost_billing_reports.py +++ b/cloud_governance/policy/ibm/cost_billing_reports.py @@ -23,8 +23,7 @@ def __init__(self): self.__ibm_account = IBMAccount() self.__elastic_upload = ElasticUpload() self.update_to_gsheet = UploadToGsheet() - self.owner = self.__environment_variables_dict.get('COST_CENTER_OWNER') - self.cost_center, self.__account_budget, self.__years = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.__ibm_account.short_account_id) + self.cost_center, self.__account_budget, self.__years, self.__owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.__ibm_account.short_account_id) def prepare_es_data(self, month: str, year: str, usage_cost: float = 0, next_invoice: float = 0): """This method prepares the data to upload to the es""" @@ -42,7 +41,7 @@ def prepare_es_data(self, month: str, year: str, usage_cost: float = 0, next_inv 'Month': month, 'CostCenter': self.cost_center, 'CloudName': 'IBM Cloud', - 'Owner': self.owner, + 'Owner': self.__owner, 'Forecast': round(next_invoice, 3), 'Actual': round(usage_cost, 3), 'filter_date': f'{start_date}-{month.split()[-1]}', @@ -66,10 +65,17 @@ def get_cost_usage_details(self): if past_usage_cost: es_data = self.prepare_es_data(usage_cost=round(past_usage_cost.get('resources').get('billable_cost'), 3), month=str(last_month), year=str(last_month_year)) upload_es_data[es_data['index_id']] = es_data - for next_month in range(month+1, month+11): - new_year = date + relativedelta(month=next_month) - es_data = self.prepare_es_data(month=new_year.strftime("%m"), year=str(new_year.year)) - upload_es_data[es_data['index_id']] = es_data + for next_month in range(self.MONTHS): + next_month = (next_month + month) % self.MONTHS + if next_month != month: + c_year = year + if len(str(next_month)) != 2: + next_month = f'0{next_month}' + if next_month == '00': + year += 1 + next_month = str(12) + es_data = self.prepare_es_data(month=str(next_month), year=str(c_year)) + upload_es_data[es_data['index_id']] = es_data if upload_es_data: self.__elastic_upload.es_upload_data(items=list(upload_es_data.values()), set_index='index_id') return list(upload_es_data.values()) diff --git a/cloud_governance/policy/ibm/ibm_cost_report.py b/cloud_governance/policy/ibm/ibm_cost_report.py index 0af571e7..d0f5bd34 100644 --- a/cloud_governance/policy/ibm/ibm_cost_report.py +++ b/cloud_governance/policy/ibm/ibm_cost_report.py @@ -27,7 +27,7 @@ def __init__(self): @typechecked def collect_tags_from_machines(self, tags: list): """ - This method return tags from list of string tags + This method returns tags from list of string tags @param tags: @return: """ diff --git a/cloud_governance/policy/ibm/tag_vm.py b/cloud_governance/policy/ibm/tag_vm.py index 3462e1fd..b66b9987 100644 --- a/cloud_governance/policy/ibm/tag_vm.py +++ b/cloud_governance/policy/ibm/tag_vm.py @@ -13,7 +13,7 @@ def __init__(self): def get_virtual_machine_username(self, vm_id: str): """ - This method return the virtual machine username from the billing order lists + This method returns the virtual machine username from the billing order lists @param vm_id: @return: """ diff --git a/cloud_governance/policy/policy_operations/aws/tag_cluster/remove_cluster_tags.py b/cloud_governance/policy/policy_operations/aws/tag_cluster/remove_cluster_tags.py index a23fb720..51baf46d 100644 --- a/cloud_governance/policy/policy_operations/aws/tag_cluster/remove_cluster_tags.py +++ b/cloud_governance/policy/policy_operations/aws/tag_cluster/remove_cluster_tags.py @@ -66,7 +66,7 @@ def remove_instance_tags(self, instance_list: list, tags: list): def get_cluster(self, clusters: list): """ - This method return cluster, and it tags + This method returns cluster, and it tags @param clusters: @return: """ @@ -205,7 +205,7 @@ def cluster_images(self, instance_tags: dict): def cluster_snapshot(self, instance_tags: dict): """ - This method return list of cluster's snapshot according to cluster tag name + This method returns list of cluster's snapshot according to cluster tag name @return: """ snapshots_data = self.ec2_operations.get_snapshots() @@ -458,7 +458,7 @@ def cluster_role(self, instance_tags: dict): def cluster_user(self, instance_tags: dict): """ - This method return list of cluster's user according to cluster name + This method returns list of cluster's user according to cluster name @param instance_tags: @return: """ diff --git a/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_operations.py b/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_operations.py index 2bd5fd97..16a74638 100644 --- a/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_operations.py +++ b/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_operations.py @@ -1,3 +1,5 @@ +from datetime import datetime + import boto3 from cloud_governance.common.clouds.aws.cloudtrail.cloudtrail_operations import CloudTrailOperations @@ -11,7 +13,7 @@ class TagClusterOperations: This class tags AWS resources """ - def __init__(self, input_tags: dict, cluster_name: str, cluster_prefix: str, region: str, dry_run: str, cluster_only: bool): + def __init__(self, region: str, input_tags: dict = None, cluster_name: str = None, cluster_prefix: str = None, dry_run: str = None, cluster_only: bool = None): self.cluster_only = cluster_only self.cluster_prefix = cluster_prefix self.utils = Utils(region=region) @@ -27,6 +29,7 @@ def __init__(self, input_tags: dict, cluster_name: str, cluster_prefix: str, r self.cloudtrail = CloudTrailOperations(region_name='us-east-1') self._get_username_from_instance_id_and_time = CloudTrailOperations(region_name=region).get_username_by_instance_id_and_time self.dry_run = dry_run + self.iam_users = self.iam_operations.get_iam_users_list() def _input_tags_list_builder(self): """ @@ -67,3 +70,29 @@ def _fill_na_tags(self, user: str = None): else: tags.append({'Key': key, 'Value': value}) return tags + + def get_user_name_from_name_tag(self, tags: list): + """ + This method retuns the username from the name tag verified with iam users + :param tags: + :return: + """ + user_name = self.ec2_operations.get_tag_value_from_tags(tags=tags, tag_name='User') + if user_name in self.iam_users: + return user_name + else: + name_tag = self.ec2_operations.get_tag_value_from_tags(tags=tags, tag_name='Name') + for user in self.iam_users: + if user in name_tag: + return user + return None + + def get_username(self, start_time: datetime, resource_id: str, resource_type: str, tags: list): + """ + This method returns the username + :return: + """ + iam_username = self.get_user_name_from_name_tag(tags=tags) + if not iam_username: + return self._get_username_from_instance_id_and_time(start_time=start_time, resource_id=resource_id, resource_type=resource_type) + return iam_username diff --git a/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_resouces.py b/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_resouces.py index 5b9fa619..50f69510 100644 --- a/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_resouces.py +++ b/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_resouces.py @@ -12,6 +12,7 @@ class TagClusterResources(TagClusterOperations): """ SHORT_ID = 5 + NA_VALUE = 'NA' def __init__(self, cluster_name: str = None, cluster_prefix: str = None, input_tags: dict = None, region: str = 'us-east-2', dry_run: str = 'yes', cluster_only: bool = False): @@ -107,7 +108,7 @@ def __remove_tags_start_with_aws(self, tags: list): def __generate_cluster_resources_list_by_tag(self, resources_list: list, input_resource_id: str, tags: str = 'Tags'): """ - This method return resource list that related to input resource id according to cluster's tag name and update the tags + This method returns resource list that related to input resource id according to cluster's tag name and update the tags @param resources_list: @param input_resource_id: @param ids: @@ -141,10 +142,7 @@ def __generate_cluster_resources_list_by_tag(self, resources_list: list, input_r for cluster_name, cluster_id in cluster_ids.items(): if self.dry_run == 'no': try: - if self.cluster_name in cluster_name: - self.ec2_client.create_tags(Resources=cluster_id, Tags=cluster_tags.get(cluster_name)) - else: - self.ec2_client.create_tags(Resources=cluster_id, Tags=cluster_tags.get(cluster_name)) + self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=cluster_id, tags=cluster_tags.get(cluster_name)) logger.info(f'{input_resource_id}:: {cluster_name}, count: {len(cluster_id)}, {cluster_id}, {cluster_tags.get(cluster_name)}') except Exception as err: logger.info(err) @@ -153,7 +151,7 @@ def __generate_cluster_resources_list_by_tag(self, resources_list: list, input_r def __generate_cluster_resources_list_by_vpc(self, resources_list: list, input_resource_id: str): """ - This method return resource list that related to input resource id according to cluster's vpc id + This method returns resource list that related to input resource id according to cluster's vpc id @param resources_list: @param input_resource_id: @return: @@ -174,12 +172,12 @@ def __generate_cluster_resources_list_by_vpc(self, resources_list: list, input_r if self.cluster_name: if self.cluster_name in cluster_tag[0].get('Key'): if self.dry_run == 'no': - self.ec2_client.create_tags(Resources=[resource_id], Tags=all_tags) + self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[resource_id], tags=all_tags) logger.info(all_tags) result_resources_list.append(resource_id) else: if self.dry_run == 'no': - self.ec2_client.create_tags(Resources=[resource_id], Tags=all_tags) + self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[resource_id], tags=all_tags) logger.info(all_tags) result_resources_list.append(resource_id) break @@ -229,11 +227,14 @@ def __validate_existing_tag(self, tags: list): @param tags: @return: """ + check_tags = ['User', 'Project', 'Manager', 'Owner', 'Email'] + tag_count = 0 for tag in tags: - for key, value in self.input_tags.items(): - if tag.get('Key') == key: - return True - return False + if tag.get('Key') in check_tags: + tag_count += 1 + if tag.get('Value') == 'NA': + return False + return tag_count == len(check_tags) def update_cluster_tags(self, resources: list): """ @@ -248,24 +249,22 @@ def update_cluster_tags(self, resources: list): for instance in resources: for item in instance: instance_id = item['InstanceId'] - if item.get('Tags'): + tags = item.get('Tags') + if tags: # search that not exist permanent tags in the resource - if not self.__validate_existing_tag(item.get('Tags')): - for tag in item['Tags']: + if not self.__validate_existing_tag(tags): + for tag in tags: if self.cluster_prefix in tag.get('Key'): add_tags = self.__append_input_tags() cluster_name = tag.get('Key').split('/')[-1] - if cluster_name in cluster_instances: - add_tags = self.__filter_resource_tags_by_add_tags(tags=item.get('Tags'), - search_tags=cluster_tags[ - cluster_name]) + user = self.ec2_operations.get_tag_value_from_tags(tags=tags, tag_name='User') + if cluster_name in cluster_instances and user and user != 'NA': + add_tags = self.__filter_resource_tags_by_add_tags(tags=tags, search_tags=cluster_tags[cluster_name]) if add_tags: cluster_instances[cluster_name].append(instance_id) break else: - username = self._get_username_from_instance_id_and_time( - start_time=item.get('LaunchTime'), resource_id=instance_id, - resource_type='AWS::EC2::Instance') + username = self.get_username(start_time=item.get('LaunchTime'), resource_id=instance_id, resource_type='AWS::EC2::Instance', tags=tags) if username: if username == 'AutoScaling': add_tags.extend(self._fill_na_tags(user=username)) @@ -275,10 +274,14 @@ def update_cluster_tags(self, resources: list): if not self.__check_user_in_username_tags(user_tags): try: user = self.iam_client.get_user(UserName=username)['User'] - username = self.cloudtrail.get_username_by_instance_id_and_time( + temp_username = self.cloudtrail.get_username_by_instance_id_and_time( start_time=user.get('CreateDate'), resource_id=username, resource_type='AWS::IAM::User') - user_tags = self.iam_operations.get_user_tags(username=username) + if temp_username: + add_tags.append({'Key': 'User', 'Value': temp_username}) + user_tags = self.iam_operations.get_user_tags(username=temp_username) + else: + add_tags.append({'Key': 'User', 'Value': username}) except: add_tags.append({'Key': 'User', 'Value': username}) if user_tags: @@ -294,17 +297,13 @@ def update_cluster_tags(self, resources: list): add_tags = self.__filter_resource_tags_by_add_tags(tags=item.get('Tags'), search_tags=add_tags) if add_tags: - cluster_instances[cluster_name] = [instance_id] + cluster_instances.setdefault(cluster_name, []).append(instance_id) cluster_tags[cluster_name] = add_tags break for cluster_instance_name, instance_ids in cluster_instances.items(): if self.dry_run == 'no': try: - if self.cluster_name: - if cluster_instance_name == self.cluster_name: - self.ec2_client.create_tags(Resources=instance_ids, Tags=cluster_tags.get(cluster_instance_name)) - else: - self.ec2_client.create_tags(Resources=instance_ids, Tags=cluster_tags.get(cluster_instance_name)) + self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=instance_ids, tags=cluster_tags.get(cluster_instance_name)) logger.info(f'Cluster :: {cluster_instance_name} count: {len(instance_ids)} :: InstanceId :: {instance_ids} :: {cluster_tags.get(cluster_instance_name)}') except Exception as err: logger.info(err) @@ -318,7 +317,7 @@ def update_cluster_tags(self, resources: list): def cluster_instance(self): """ - This method return list of cluster's instance according to cluster tag name, + This method returns list of cluster's instance according to cluster tag name, The instances list is different from other resources it will search for full cluster name (including random suffix string) in case of user input cluster name was given @return: @@ -339,7 +338,7 @@ def cluster_instance(self): def cluster_volume(self): """ - This method return list of cluster's volume according to cluster tag name + This method returns list of cluster's volume according to cluster tag name @return: """ volumes_data = self.ec2_operations.get_volumes() @@ -353,7 +352,7 @@ def cluster_volume(self): def cluster_ami(self): """ - This method return list of cluster's ami according to cluster tag name + This method returns list of cluster's ami according to cluster tag name @return: """ images_data = self.ec2_operations.get_images() @@ -367,7 +366,7 @@ def cluster_ami(self): def cluster_snapshot(self): """ - This method return list of cluster's snapshot according to cluster tag name + This method returns list of cluster's snapshot according to cluster tag name @return: """ snapshots_data = self.ec2_operations.get_snapshots() @@ -381,14 +380,14 @@ def cluster_snapshot(self): def __get_security_group_data(self): """ - This method return security group data + This method returns security group data @return: """ return self.ec2_operations.get_security_groups() def cluster_security_group(self): """ - This method return list of cluster's security group according to cluster tag name + This method returns list of cluster's security group according to cluster tag name @return: """ security_group_ids = self.__generate_cluster_resources_list_by_tag( @@ -398,7 +397,7 @@ def cluster_security_group(self): def cluster_elastic_ip(self): """ - This method return list of cluster's elastic ip according to cluster tag name + This method returns list of cluster's elastic ip according to cluster tag name @return: """ elastic_ips_data = self.ec2_operations.get_elastic_ips() @@ -409,7 +408,7 @@ def cluster_elastic_ip(self): def cluster_network_interface(self): """ - This method return list of cluster's network interface according to cluster tag name + This method returns list of cluster's network interface according to cluster tag name @return: """ network_interfaces_data = self.ec2_operations.get_network_interface() @@ -422,7 +421,7 @@ def cluster_network_interface(self): def cluster_load_balancer(self): """ - This method return list of cluster's load balancer according to cluster vpc + This method returns list of cluster's load balancer according to cluster vpc @return: """ result_resources_list = [] @@ -470,7 +469,7 @@ def cluster_load_balancer(self): def cluster_load_balancer_v2(self): """ - This method return list of cluster's load balancer according to cluster vpc + This method returns list of cluster's load balancer according to cluster vpc @return: """ result_resources_list = [] @@ -518,7 +517,7 @@ def cluster_load_balancer_v2(self): def cluster_vpc(self): """ - This method return list of cluster's vpc according to cluster tag name + This method returns list of cluster's vpc according to cluster tag name @return: """ vpcs_data = self.ec2_operations.get_vpcs() @@ -545,7 +544,7 @@ def get_cluster_vpc(self): def cluster_subnet(self): """ - This method return list of cluster's subnet according to cluster tag name + This method returns list of cluster's subnet according to cluster tag name @return: """ subnets_data = self.ec2_operations.get_subnets() @@ -556,7 +555,7 @@ def cluster_subnet(self): def cluster_route_table(self): """ - This method return list of cluster's route table according to cluster tag name + This method returns list of cluster's route table according to cluster tag name @return: """ route_tables_data = self.ec2_operations.get_route_tables() @@ -567,7 +566,7 @@ def cluster_route_table(self): def cluster_internet_gateway(self): """ - This method return list of cluster's route table internet gateway according to cluster tag name + This method returns list of cluster's route table internet gateway according to cluster tag name @return: """ internet_gateways_data = self.ec2_operations.get_internet_gateways() @@ -579,7 +578,7 @@ def cluster_internet_gateway(self): def cluster_dhcp_option(self): """ - This method return list of cluster's dhcp option according to cluster tag name + This method returns list of cluster's dhcp option according to cluster tag name @return: """ dhcp_options_data = self.ec2_operations.get_dhcp_options() @@ -590,7 +589,7 @@ def cluster_dhcp_option(self): def cluster_vpc_endpoint(self): """ - This method return list of cluster's vpc endpoint according to cluster tag name + This method returns list of cluster's vpc endpoint according to cluster tag name @return: """ vpc_endpoints_data = self.ec2_operations.get_vpce() @@ -601,7 +600,7 @@ def cluster_vpc_endpoint(self): def cluster_nat_gateway(self): """ - This method return list of cluster's nat gateway according to cluster tag name + This method returns list of cluster's nat gateway according to cluster tag name @return: """ nat_gateways_data = self.ec2_operations.get_nat_gateways() @@ -612,7 +611,7 @@ def cluster_nat_gateway(self): def cluster_network_acl(self): """ - This method return list of cluster's network acl according to cluster vpc id + This method returns list of cluster's network acl according to cluster vpc id Missing OpenShift Tags for it based on VPCs @return: """ @@ -624,7 +623,7 @@ def cluster_network_acl(self): def cluster_role(self, cluster_names: list = []): """ - This method return list of cluster's role according to cluster name + This method returns list of cluster's role according to cluster name @param cluster_names: @return: """ @@ -674,7 +673,7 @@ def cluster_role(self, cluster_names: list = []): def cluster_user(self, cluster_names: list = []): """ - This method return list of cluster's user according to cluster name + This method returns list of cluster's user according to cluster name @param cluster_names: @return: """ @@ -730,13 +729,22 @@ def __filter_resource_tags_by_add_tags(self, tags: list, search_tags: list): for search_tag in search_tags: found = False for tag in tags: - if tag.get('Key') == search_tag.get('Key'): + if tag.get('Key') == search_tag.get('Key') and tag.get('Value') != 'NA': found = True + break if not found: add_tags.append(search_tag) else: add_tags.extend(search_tags) - return add_tags + filter_tags = {} + for tag in add_tags: + key = tag.get('Key') + value = tag.get('Value') + if key in filter_tags and filter_tags[key].get('Value') == self.NA_VALUE: + filter_tags[key] = {'Key': key, 'Value': value} + else: + filter_tags[key] = {'Key': key, 'Value': value} + return list(filter_tags.values()) def __remove_launchTime(self, tags: list): """ @@ -748,7 +756,7 @@ def __remove_launchTime(self, tags: list): def cluster_s3_bucket(self, cluster_names: list = []): """ - This method return list of cluster's s3 bucket according to cluster name + This method returns list of cluster's s3 bucket according to cluster name @param cluster_names: @return: """ diff --git a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/non_cluster_operations.py b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/non_cluster_operations.py index 8db6b2eb..f7b3519d 100644 --- a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/non_cluster_operations.py +++ b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/non_cluster_operations.py @@ -10,6 +10,8 @@ class NonClusterOperations: + NA_VALUE = 'NA' + def __init__(self, region: str = 'us-east-2', dry_run: str = 'yes', input_tags: dict = ''): self.region = region self.dry_run = dry_run @@ -21,6 +23,7 @@ def __init__(self, region: str = 'us-east-2', dry_run: str = 'yes', input_tags: self.iam_client = IAMOperations() self.ec2_operations = EC2Operations(region=region) self.utils = Utils(region=region) + self.iam_users = self.iam_client.get_iam_users_list() def _get_instances_data(self, instance_id: str = ''): """ @@ -63,13 +66,21 @@ def _get_tags_of_resources(self, tags: list, search_tags: list): for search_tag in search_tags: found = False for tag in tags: - if tag.get('Key') == search_tag.get('Key'): + if tag.get('Key') == search_tag.get('Key') and tag.get('Value') != 'NA': found = True if not found: add_tags.append(search_tag) else: add_tags.extend(search_tags) - return add_tags + filter_tags = {} + for tag in add_tags: + key = tag.get('Key') + value = tag.get('Value') + if key in filter_tags and filter_tags[key].get('Value') == self.NA_VALUE: + filter_tags[key] = {'Key': key, 'Value': value} + else: + filter_tags[key] = {'Key': key, 'Value': value} + return list(filter_tags.values()) def _fill_na_tags(self, user: str = None): """ @@ -89,15 +100,15 @@ def _fill_na_tags(self, user: str = None): tags.append({'Key': key, 'Value': value}) return tags - def _get_username_from_cloudtrail(self, start_time: datetime, resource_id: str, resource_type: str): + def _get_username_from_cloudtrail(self, start_time: datetime, resource_id: str, resource_type: str, end_time: datetime = None): """ - This method return username fom cloudtrail + This method returns username fom cloudtrail @param start_time: @param resource_id: @param resource_type: @return: """ - return self.cloudtrail.get_username_by_instance_id_and_time(start_time=start_time, resource_id=resource_id, resource_type=resource_type) + return self.cloudtrail.get_username_by_instance_id_and_time(start_time=start_time, resource_id=resource_id, resource_type=resource_type, end_time=end_time) def _get_resource_data(self, resource_method: callable): """ @@ -121,7 +132,7 @@ def _convert_datetime_format(self, date_time: datetime): def _build_tag(self, key: str, value: any): """ - This method return Key value pair + This method returns Key value pair @param key: @param value: @return: @@ -152,7 +163,7 @@ def _get_tags_from_instance_item(self, instance_item: dict): def _get_tags_fom_attachments(self, attachments: list): """ - This method return tags from attachments + This method returns tags from attachments @param attachments: @return: """ @@ -183,4 +194,43 @@ def _get_tags_from_snapshot_description_images(self, description: str): username = self._get_username_from_cloudtrail(start_time=start_time, resource_id=image_id, resource_type='AWS::EC2::Ami') return tags, username + def get_user_name_from_name_tag(self, tags: list = None, resource_name: str = None): + """ + This method retuns the username from the name tag verified with iam users + :param resource_name: + :param tags: + :return: + """ + name_tag = self.ec2_operations.get_tag_value_from_tags(tags=tags, tag_name='Name') if tags else resource_name + for user in self.iam_users: + if user in name_tag: + return user + return None + + def get_username(self, start_time: datetime, resource_id: str, resource_type: str, tags: list, resource_name: str = '', end_time: datetime = None): + """ + This method returns the username + :return: + """ + iam_username = self.get_user_name_from_name_tag(tags=tags, resource_name=resource_name) + if not iam_username: + iam_username = self.get_user_name_from_name_tag(resource_name=resource_name) + if not iam_username: + return self._get_username_from_cloudtrail(start_time=start_time, resource_id=resource_id, resource_type=resource_type, end_time=end_time) + return iam_username + def validate_existing_tag(self, tags: list): + """ + This method validates that permanent tag exists in tags list + @param tags: + @return: + """ + check_tags = ['User', 'Project', 'Manager', 'Owner', 'Email'] + tag_count = 0 + if tags: + for tag in tags: + if tag.get('Key') in check_tags: + tag_count += 1 + if tag.get('Value') == 'NA': + return False + return tag_count == len(check_tags) diff --git a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/tag_non_cluster_resources.py b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/tag_non_cluster_resources.py index 69b97b5d..0b14d684 100644 --- a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/tag_non_cluster_resources.py +++ b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/tag_non_cluster_resources.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta from cloud_governance.common.logger.init_logger import logger from cloud_governance.policy.policy_operations.aws.tag_non_cluster.non_cluster_operations import NonClusterOperations @@ -36,7 +36,7 @@ def __get_instance_tags(self, launch_time: datetime, instance_id: str, tags: lis @param tags: @return: """ - username = self._get_username_from_cloudtrail(start_time=launch_time, resource_id=instance_id, resource_type='AWS::EC2::Instance') + username = self.get_username(start_time=launch_time, resource_id=instance_id, resource_type='AWS::EC2::Instance', tags=tags) search_tags = [] user_tags = [] if not username: @@ -67,15 +67,17 @@ def non_cluster_update_ec2(self, instances_list: list = None): for item in instance: instance_id = item.get('InstanceId') launch_time = item.get('LaunchTime') - add_tags = self.__get_instance_tags(launch_time=launch_time, instance_id=instance_id, tags=item.get('Tags')) - if add_tags: - if self.dry_run == 'no': - try: - self.ec2_client.create_tags(Resources=[instance_id], Tags=add_tags) - logger.info(f'Added tags to instance: {instance_id} total: {len(add_tags)} tags: {add_tags}') - except Exception as err: - logger.info(err) - instances_ids.append(instance_id) + tags = item.get('Tags') + if not self.validate_existing_tag(tags=tags): + add_tags = self.__get_instance_tags(launch_time=launch_time, instance_id=instance_id, tags=tags) + if add_tags: + if self.dry_run == 'no': + try: + self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[instance_id], tags=add_tags) + logger.info(f'Added tags to instance: {instance_id} total: {len(add_tags)} tags: {add_tags}') + except Exception as err: + logger.info(err) + instances_ids.append(instance_id) logger.info(f'non_cluster_ec2 count: {len(sorted(instances_ids))} {sorted(instances_ids)}') return sorted(instances_ids) @@ -90,37 +92,39 @@ def update_volumes(self, volumes_data: list = None): volume_ids = [] for volume in volumes_data: volume_id = volume.get('VolumeId') - username = self._get_username_from_cloudtrail(start_time=volume.get('CreateTime'), resource_id=volume_id, resource_type='AWS::EC2::Volume') - search_tags = [] - if not username: - get_tags, username = self._get_tags_fom_attachments(attachments=volume.get('Attachments')) - search_tags.extend(get_tags) - else: - search_tags.extend(self._append_input_tags()) - if username: - user_tags = self.iam_client.get_user_tags(username=username) - if not user_tags: - search_tags.extend(self._fill_na_tags(user=username)) + tags = volume.get('Tags') + if not self.validate_existing_tag(tags=tags): + username = self.get_username(start_time=volume.get('CreateTime'), resource_id=volume_id, resource_type='AWS::EC2::Volume', tags=tags) + search_tags = [] + if not username: + get_tags, username = self._get_tags_fom_attachments(attachments=volume.get('Attachments')) + search_tags.extend(get_tags) else: - search_tags.extend(user_tags) - search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'}) - search_tags.append(self._build_tag(key='LaunchTime', value=volume.get('CreateTime'))) - else: - search_tags.extend(self._fill_na_tags()) - search_tags.extend(self._append_input_tags()) - search_tags.append(self._build_tag(key='LaunchTime', value=volume.get('CreateTime'))) - if not self.__check_name_in_tags(volume.get('Tags')): - tag_name = f'{username}-{volume_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{volume_id[:self.SHORT_RESOURCE_NAME]}-{self.region}-{volume_id[-self.SHORT_RESOURCE_ID:]}' - search_tags.append({'Key': 'cg-Name', 'Value': tag_name}) - volume_tags = self._get_tags_of_resources(tags=volume.get('Tags'), search_tags=search_tags) - if volume_tags: - if self.dry_run == 'no': - try: - self.ec2_client.create_tags(Resources=[volume_id], Tags=volume_tags) - logger.info(f'added tags to volume_id: {volume_id} total: {len(volume_tags)} tags: {volume_tags}') - except Exception as err: - logger.info(err) - volume_ids.append(volume_id) + search_tags.extend(self._append_input_tags()) + if username: + user_tags = self.iam_client.get_user_tags(username=username) + if not user_tags: + search_tags.extend(self._fill_na_tags(user=username)) + else: + search_tags.extend(user_tags) + search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'}) + search_tags.append(self._build_tag(key='LaunchTime', value=volume.get('CreateTime'))) + else: + search_tags.extend(self._fill_na_tags()) + search_tags.extend(self._append_input_tags()) + search_tags.append(self._build_tag(key='LaunchTime', value=volume.get('CreateTime'))) + if not self.__check_name_in_tags(volume.get('Tags')): + tag_name = f'{username}-{volume_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{volume_id[:self.SHORT_RESOURCE_NAME]}-{self.region}-{volume_id[-self.SHORT_RESOURCE_ID:]}' + search_tags.append({'Key': 'cg-Name', 'Value': tag_name}) + volume_tags = self._get_tags_of_resources(tags=volume.get('Tags'), search_tags=search_tags) + if volume_tags: + if self.dry_run == 'no': + try: + self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[volume_id], tags=volume_tags) + logger.info(f'added tags to volume_id: {volume_id} total: {len(volume_tags)} tags: {volume_tags}') + except Exception as err: + logger.info(err) + volume_ids.append(volume_id) logger.info(f'non_cluster_volumes count: {len(sorted(volume_ids))} {sorted(volume_ids)}') return sorted(volume_ids) @@ -135,43 +139,51 @@ def update_snapshots(self, snapshots: list = None): snapshot_ids = [] for snapshot in snapshots: snapshot_id = snapshot.get('SnapshotId') - username = self._get_username_from_cloudtrail(start_time=snapshot.get('StartTime'), resource_id=snapshot_id, resource_type='AWS::EC2::Snapshot') - search_tags = [] - if not username: - if snapshot.get('Description') and 'Created' in snapshot.get('Description'): - image_tags, username = self._get_tags_from_snapshot_description_images(description=snapshot.get('Description')) - if not username: - instance_id = snapshot.get('Description').split(" ")[2].split("(")[1][:-1] - instances = self._get_instances_data(instance_id) - if instances: - for item in instances: - if item.get('InstanceId') == instance_id: - item_tags, username = self._get_tags_from_instance_item(instance_item=item) - else: - search_tags.extend(self._append_input_tags()) - if username: - user_tags = self.iam_client.get_user_tags(username=username) - search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'}) - if not user_tags: - search_tags.extend(self._fill_na_tags(user=username)) + tags = snapshot.get('Tags') + if not self.validate_existing_tag(tags=tags): + username = self.get_username(start_time=snapshot.get('StartTime'), resource_id=snapshot_id, resource_type='AWS::EC2::Snapshot', tags=tags) + if 'vm_import_image' in username: + start_time = snapshot.get('StartTime') + timedelta(seconds=5) + end_time = start_time + timedelta(minutes=30) + assume_username = self.get_username(start_time=start_time, resource_id=snapshot_id, resource_type='AWS::EC2::Snapshot', tags=tags, end_time=end_time) + if assume_username: + username = assume_username + search_tags = [] + if not username: + if snapshot.get('Description') and 'Created' in snapshot.get('Description'): + image_tags, username = self._get_tags_from_snapshot_description_images(description=snapshot.get('Description')) + if not username: + instance_id = snapshot.get('Description').split(" ")[2].split("(")[1][:-1] + instances = self._get_instances_data(instance_id) + if instances: + for item in instances: + if item.get('InstanceId') == instance_id: + item_tags, username = self._get_tags_from_instance_item(instance_item=item) else: - search_tags.extend(user_tags) - else: - search_tags.extend(self._fill_na_tags()) - search_tags.extend(self._append_input_tags()) - if not self.__check_name_in_tags(snapshot.get('Tags')): - tag_name = f'{username}-{snapshot_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{snapshot_id[:self.SHOT_SNAPSHOT_ID]}-{self.region}-{snapshot_id[-self.SHORT_RESOURCE_ID:]}' - search_tags.append({'Key': 'cg-Name', 'Value': tag_name}) - search_tags.append(self._build_tag(key='LaunchTime', value=snapshot.get('StartTime'))) - snapshot_tags = self._get_tags_of_resources(tags=snapshot.get('Tags'), search_tags=search_tags) - if snapshot_tags: - if self.dry_run == 'no': - try: - self.ec2_client.create_tags(Resources=[snapshot_id], Tags=snapshot_tags) - logger.info(f'added tags to snapshots: {snapshot_id} total: {len(snapshot_tags)} tags: {snapshot_tags}') - except Exception as err: - logger.info(err) - snapshot_ids.append(snapshot_id) + search_tags.extend(self._append_input_tags()) + if username: + user_tags = self.iam_client.get_user_tags(username=username) + search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'}) + if not user_tags: + search_tags.extend(self._fill_na_tags(user=username)) + else: + search_tags.extend(user_tags) + else: + search_tags.extend(self._fill_na_tags()) + search_tags.extend(self._append_input_tags()) + if not self.__check_name_in_tags(snapshot.get('Tags')): + tag_name = f'{username}-{snapshot_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{snapshot_id[:self.SHOT_SNAPSHOT_ID]}-{self.region}-{snapshot_id[-self.SHORT_RESOURCE_ID:]}' + search_tags.append({'Key': 'cg-Name', 'Value': tag_name}) + search_tags.append(self._build_tag(key='LaunchTime', value=snapshot.get('StartTime'))) + snapshot_tags = self._get_tags_of_resources(tags=snapshot.get('Tags'), search_tags=search_tags) + if snapshot_tags: + if self.dry_run == 'no': + try: + self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[snapshot_id], tags=snapshot_tags) + logger.info(f'added tags to snapshots: {snapshot_id} total: {len(snapshot_tags)} tags: {snapshot_tags}') + except Exception as err: + logger.info(err) + snapshot_ids.append(snapshot_id) logger.info(f'non_cluster_snapshot count: {len(sorted(snapshot_ids))} {sorted(snapshot_ids)}') return sorted(snapshot_ids) @@ -187,31 +199,34 @@ def update_ami(self, images: list = None): image_ids = [] for image in images: image_id = image.get('ImageId') + tags = image.get('Tags') + image_name = image.get('Name') start_time = datetime.fromisoformat(image.get('CreationDate')[:-1] + '+00:00') - username = self._get_username_from_cloudtrail(start_time=start_time, resource_id=image_id, resource_type='AWS::EC2::Ami') - search_tags = [] - search_tags.extend(self._append_input_tags()) - if username: - user_tags = self.iam_client.get_user_tags(username=username) - search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'}) - if not user_tags: - search_tags.extend(self._fill_na_tags(user=username)) + if not self.validate_existing_tag(tags=tags): + username = self.get_username(start_time=start_time, resource_id=image_id, resource_type='AWS::EC2::Ami', tags=tags, resource_name=image_name) + search_tags = [] + search_tags.extend(self._append_input_tags()) + if username: + user_tags = self.iam_client.get_user_tags(username=username) + search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'}) + if not user_tags: + search_tags.extend(self._fill_na_tags(user=username)) + else: + search_tags.extend(user_tags) else: - search_tags.extend(user_tags) - else: - search_tags.extend(self._fill_na_tags()) - if not self.__check_name_in_tags(image.get('Tags')): - tag_name = f'{username}-{image_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{image_id[:self.SHORT_RESOURCE_NAME]}-{self.region}-{image_id[-self.SHORT_RESOURCE_ID:]}' - search_tags.append({'Key': 'cg-Name', 'Value': tag_name}) - search_tags.append(self._build_tag(key='LaunchTime', value=start_time)) - image_tags = self._get_tags_of_resources(tags=image.get('Tags'), search_tags=search_tags) - if image_tags: - if self.dry_run == 'no': - try: - self.ec2_client.create_tags(Resources=[image_id], Tags=image_tags) - logger.info(f'added tags to image: {image_id} total: {len(image_tags)} tags: {image_tags}') - except Exception as err: - logger.info(err) - image_ids.append(image_id) + search_tags.extend(self._fill_na_tags()) + if not self.__check_name_in_tags(image.get('Tags')): + tag_name = f'{username}-{image_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{image_id[:self.SHORT_RESOURCE_NAME]}-{self.region}-{image_id[-self.SHORT_RESOURCE_ID:]}' + search_tags.append({'Key': 'cg-Name', 'Value': tag_name}) + search_tags.append(self._build_tag(key='LaunchTime', value=start_time)) + image_tags = self._get_tags_of_resources(tags=image.get('Tags'), search_tags=search_tags) + if image_tags: + if self.dry_run == 'no': + try: + self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[image_id], tags=image_tags) + logger.info(f'added tags to image: {image_id} total: {len(image_tags)} tags: {image_tags}') + except Exception as err: + logger.info(err) + image_ids.append(image_id) logger.info(f'non_cluster_amis count: {len(sorted(image_ids))} {sorted(image_ids)}') return sorted(image_ids) diff --git a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/update_na_tag_resources.py b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/update_na_tag_resources.py index 0ef1f548..8a037eca 100644 --- a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/update_na_tag_resources.py +++ b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/update_na_tag_resources.py @@ -46,7 +46,7 @@ def __get_resource_ids(self, resource_id: str, resource_name: str): def __get_key_value(self, key, value): """ - This method return key-value pairs + This method returns key-value pairs :param key: :param value: :return: diff --git a/cloud_governance/policy/policy_operations/aws/tag_user/tag_iam_user.py b/cloud_governance/policy/policy_operations/aws/tag_user/tag_iam_user.py index eb7229cf..e359c61a 100644 --- a/cloud_governance/policy/policy_operations/aws/tag_user/tag_iam_user.py +++ b/cloud_governance/policy/policy_operations/aws/tag_user/tag_iam_user.py @@ -1,4 +1,5 @@ import csv +import os.path import re from ast import literal_eval @@ -77,6 +78,7 @@ def __write_into_csv_file(self, tag_keys: list, tag_values: dict): else: file.write(' , ') file.write('\n') + logger.info(f'Generated the file: {self.file_name}') def generate_user_csv(self): """ @@ -89,7 +91,7 @@ def generate_user_csv(self): tag_values = {} for user in users: user_name = user.get('UserName') - if '-' not in user_name: + if user_name.count('-') <= 3: user_tags = self.IAMOperations.get_user_tags(username=user_name) tag_values[user_name] = {} for tag in user_tags: @@ -105,8 +107,6 @@ def generate_user_csv(self): break tag_keys = list(sorted(tag_keys)) self.__write_into_csv_file(tag_keys=tag_keys, tag_values=tag_values) - with open(self.file_name) as file: - logger.info(file.read()) def __filter_tags_user_tags(self, user_tags: list, append_tags: list): """ @@ -121,8 +121,7 @@ def __filter_tags_user_tags(self, user_tags: list, append_tags: list): found = False for user_tag in user_tags: if user_tag.get('Key').strip() == append_tag.get('Key').strip(): - if user_tag.get('Value').strip() == append_tag.get('Value').strip(): - found = True + found = True if not found: add_tags.append(append_tag) else: @@ -176,30 +175,55 @@ def update_user_tags(self): """ count = 0 updated_usernames = [] - with open(self.file_name) as file: - csvreader = csv.reader(file) - header = next(csvreader) - rows = [] - for row in csvreader: - rows.append(row) - json_data = self.__get_json_data(header, rows) - for key, tags in json_data.items(): - try: - user_tags = self.IAMOperations.get_user_tags(username=key) - tags.append({'Key': 'User', 'Value': key}) - tags.extend(self.get_user_details_from_ldap(user_name=key)) - filter_tags = self.__filter_tags_user_tags(user_tags, tags) - if filter_tags: - self.iam_client.tag_user(UserName=key, Tags=filter_tags) - logger.info(f'Username :: {key} {filter_tags}') + if os.path.exists(self.file_name): + with open(self.file_name) as file: + csvreader = csv.reader(file) + header = next(csvreader) + rows = [] + for row in csvreader: + rows.append(row) + json_data = self.__get_json_data(header, rows) + for key, tags in json_data.items(): + if self.tag_iam_user_tags(username=key, tags=tags): updated_usernames.append(key) count += 1 - except Exception as err: - logger.info(err) + else: + users_list = self.get_detail_resource_list(func_name=self.iam_client.list_users, input_tag='Users', + check_tag='Marker') + for user in users_list: + username = user.get('UserName') + if username.count('-') <= 3: # assumed if username contains 3 hyphens, it is cluster user + if self.tag_iam_user_tags(username=username): + updated_usernames.append(username) + count += 1 logger.info(f'Updated Tags of IAM Users = {count} :: Usernames {updated_usernames}') return count - def __format_tags(self, username: str, headers: list): + def tag_iam_user_tags(self, username: str, tags: list = None): + """ + This method tags the IAM User tags + :param tags: + :param username: + :return: + """ + try: + if not tags: + tags = [] + user_tags = self.IAMOperations.get_user_tags(username=username) + tags.append({'Key': 'User', 'Value': username}) + tags.extend(self.get_user_details_from_ldap(user_name=username)) + filter_tags = self.__filter_tags_user_tags(user_tags, tags) + if filter_tags: + self.iam_client.tag_user(UserName=username, Tags=filter_tags) + logger.info(f'Username :: {username} {filter_tags}') + return True + except Exception as err: + logger.error(err) + return False + + def __format_tags(self, username: str, headers: list = None): + if not headers: + headers = ['User'] tags = {'User': username} user_tags = self.IAMOperations.get_user_tags(username=username) for user_tag in user_tags: @@ -212,19 +236,32 @@ def delete_update_user_from_doc(self): This method removes IAM user if not in the IAM list @return: """ - iam_file = pd.read_csv(self.file_name) - iam_users = [user['UserName'] for user in self.IAMOperations.get_users()] - csv_iam_users = list(iam_file['User']) - for index, user in enumerate(csv_iam_users): - if user not in iam_users: - self.__google_drive_operations.delete_rows(spreadsheet_id=self.__SPREADSHEET_ID, - sheet_name=self.__sheet_name, row_number=index + 1) - logger.info(f'removed user {user}') + self.__google_drive_operations.create_work_sheet(gsheet_id=self.__SPREADSHEET_ID, sheet_name=self.__sheet_name) + iam_users = [user['UserName'] for user in + self.get_detail_resource_list(func_name=self.iam_client.list_users, input_tag='Users', + check_tag='Marker') if user['UserName'].count('-') <= 3] + csv_iam_users = [] + iam_file = pd.DataFrame(columns=['User', "Project"]) + if os.path.exists(self.file_name): + iam_file = pd.read_csv(self.file_name) + if not iam_file.empty: + csv_iam_users = list(iam_file['User']) + for index, user in enumerate(csv_iam_users): + if user not in iam_users: + self.__google_drive_operations.delete_rows(spreadsheet_id=self.__SPREADSHEET_ID, + sheet_name=self.__sheet_name, row_number=index + 1) + logger.info(f'removed user {user}') + else: + iam_file = pd.DataFrame(columns=['User']) append_data = [] for user in iam_users: - if '-' not in user: + if user.count('-') <= 3: if user not in csv_iam_users: - tags = self.__format_tags(username=user, headers=list(iam_file.columns)) + if not iam_file.empty: + tags = self.__format_tags(username=user, headers=list(iam_file.columns)) + else: + append_data.append(['User']) + tags = self.__format_tags(username=user) df2 = pd.DataFrame.from_dict([tags]) iam_file = pd.concat([iam_file, df2], ignore_index=True) iam_file = iam_file.fillna('') diff --git a/cloud_governance/policy/policy_operations/aws/zombie_cluster/delete_ec2_resources.py b/cloud_governance/policy/policy_operations/aws/zombie_cluster/delete_ec2_resources.py index 895c41be..2c866520 100644 --- a/cloud_governance/policy/policy_operations/aws/zombie_cluster/delete_ec2_resources.py +++ b/cloud_governance/policy/policy_operations/aws/zombie_cluster/delete_ec2_resources.py @@ -165,8 +165,9 @@ def __delete_load_balancer_v2(self, resource_id: str): @typeguard.typechecked def __delete_volume(self, resource_id: str): try: - self.client.delete_volume(VolumeId=resource_id) - logger.info(f'delete_volume: {resource_id}') + logger.info(f'Cluster volumes are handled by ebs_unattached') + # self.client.delete_volume(VolumeId=resource_id) + # logger.info(f'delete_volume: {resource_id}') except Exception as err: logger.exception(f'Cannot delete_volume: {resource_id}, {err}') @@ -260,22 +261,31 @@ def __delete_security_group(self, resource_id: str, vpc_id: str): security_groups = self.ec2_operations.get_security_groups() vpc_security_groups = self.__get_cluster_references(resource_id=vpc_id, resource_list=security_groups, input_resource_id='VpcId', output_result='') for vpc_security_group in vpc_security_groups: - if vpc_security_group.get('GroupName') == 'default': - if vpc_security_group.get('IpPermissions'): - for ip_permission in vpc_security_group.get('IpPermissions'): - self.client.revoke_security_group_ingress(GroupId=vpc_security_group.get('GroupId'), IpPermissions=[ip_permission]) - logger.info(f'Removed the Ingress rules of Security Group {resource_id} :: {ip_permission}') - else: - if vpc_security_group.get('Tags'): - if self.__is_cluster_resource(tags=vpc_security_group.get('Tags'), cluster_tag=self.cluster_tag): - logger.info(vpc_security_group.get('GroupId')) - if vpc_security_group.get('IpPermissions'): - for ip_permission in vpc_security_group.get('IpPermissions'): - if ip_permission.get('UserIdGroupPairs'): - for user_id_group_pair in ip_permission.get('UserIdGroupPairs'): - if user_id_group_pair.get('GroupId') == resource_id: - self.client.revoke_security_group_ingress(GroupId=vpc_security_group.get('GroupId'), IpPermissions=[ip_permission]) - logger.info(f'Removed the Ingress rules of Security Group {resource_id} from {vpc_security_group.get("GroupId")}') + if resource_id != vpc_security_group.get('GroupId'): + if vpc_security_group.get('GroupName') == 'default': + logger.info(f'Removing the {resource_id} ingress rule from Default Security Group: {vpc_security_group.get("GroupId")}') + if vpc_security_group.get('IpPermissions'): + for ip_permission in vpc_security_group.get('IpPermissions'): + if ip_permission.get('UserIdGroupPairs'): + for user_id_group_pair in ip_permission.get('UserIdGroupPairs'): + if user_id_group_pair.get('GroupId') == resource_id: + ingress_rule = {'FromPort': ip_permission.get('FromPort'), 'IpProtocol': ip_permission.get('IpProtocol'), 'IpRanges': ip_permission.get('IpRanges'), 'Ipv6Ranges': ip_permission.get('Ipv6Ranges'), 'PrefixListIds': ip_permission.get('PrefixListIds'), 'ToPort': ip_permission.get('ToPort'), 'UserIdGroupPairs': [user_id_group_pair]} + self.client.revoke_security_group_ingress(GroupId=vpc_security_group.get('GroupId'), IpPermissions=[ingress_rule]) + logger.info(f'Removed the Ingress rules of Security Group {vpc_security_group.get("GroupId")} :: {ingress_rule}') + else: + if vpc_security_group.get('Tags'): + if self.__is_cluster_resource(tags=vpc_security_group.get('Tags'), cluster_tag=self.cluster_tag): + logger.info(vpc_security_group.get('GroupId')) + if vpc_security_group.get('IpPermissions'): + for ip_permission in vpc_security_group.get('IpPermissions'): + if ip_permission.get('UserIdGroupPairs'): + for user_id_group_pair in ip_permission.get('UserIdGroupPairs'): + if user_id_group_pair.get('GroupId') == resource_id: + ingress_rule = {'FromPort': ip_permission.get('FromPort'), 'IpProtocol': ip_permission.get('IpProtocol'), 'IpRanges': ip_permission.get('IpRanges'), + 'Ipv6Ranges': ip_permission.get('Ipv6Ranges'), 'PrefixListIds': ip_permission.get('PrefixListIds'), + 'ToPort': ip_permission.get('ToPort'), 'UserIdGroupPairs': [user_id_group_pair]} + self.client.revoke_security_group_ingress(GroupId=vpc_security_group.get('GroupId'), IpPermissions=[ingress_rule]) + logger.info(f'Removed the Ingress rules of Security Group {resource_id} from {ingress_rule}') network_interfaces = self.ec2_operations.get_network_interface() network_interface_ids = self.__get_cluster_references(resource_id=vpc_id, resource_list=network_interfaces, input_resource_id='VpcId', diff --git a/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py b/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py index af4d93dc..b7f2c35e 100644 --- a/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py +++ b/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py @@ -82,7 +82,7 @@ def __get_resource_list(region, delete: bool = False, resource: str = '', cluste def zombie_cluster_resource(delete: bool = False, region: str = 'us-east-2', resource: str = '', cluster_tag: str = '', resource_name: str = '', service_type: str = ''): """ - This method return zombie cluster resources, + This method returns zombie cluster resources, How its works? if not exist an instance cluster, the resource is zombie if delete true it will delete the zombie resource :return: list of zombie resources diff --git a/cloud_governance/policy/policy_operations/aws/zombie_cluster/zombie_cluster_common_methods.py b/cloud_governance/policy/policy_operations/aws/zombie_cluster/zombie_cluster_common_methods.py index 0eb4df60..3c545d4f 100644 --- a/cloud_governance/policy/policy_operations/aws/zombie_cluster/zombie_cluster_common_methods.py +++ b/cloud_governance/policy/policy_operations/aws/zombie_cluster/zombie_cluster_common_methods.py @@ -15,7 +15,7 @@ class ZombieClusterCommonMethods: DAYS_TO_TRIGGER_RESOURCE_MAIL = 4 - DAYS_TO_DELETE_RESOURCE = 7 + DAYS_TO_DELETE_RESOURCE = environment_variables.environment_variables_dict.get('DAYS_TO_DELETE_RESOURCE') def __init__(self, region: str, force_delete: bool = False): self.__environment_variables_dict = environment_variables.environment_variables_dict @@ -32,6 +32,7 @@ def __init__(self, region: str, force_delete: bool = False): self.__ldap_host_name = self.__environment_variables_dict.get('LDAP_HOST_NAME', '') self._special_user_mails = self.__environment_variables_dict.get('special_user_mails', '{}') self._account_admin = self.__environment_variables_dict.get('account_admin', '') + self.__email_alert = self.__environment_variables_dict.get('EMAIL_ALERT') if self.__environment_variables_dict.get('EMAIL_ALERT') else False self._ldap = LdapSearch(ldap_host_name=self.__ldap_host_name) self._mail = Postfix() self._mail_description = MailMessage() @@ -45,7 +46,7 @@ def _literal_eval(self, data: any): def get_tag_name_from_tags(self, tags: list, tag_name: str): """ - This method return tag_name from resource_tags + This method returns tag_name from resource_tags @param tags: @param tag_name: @return: @@ -75,7 +76,7 @@ def get_zombie_cluster_user_tag(self, zombies: dict, resources: list, resource_i def _get_tags_of_zombie_resources(self, resources: list, resource_id_name: str, zombies: dict, aws_service: str, aws_tag: str = 'Tags'): """ - This method return tags of the resource i.e {resource_id: tags} + This method returns tags of the resource i.e {resource_id: tags} @param resources: @param tags: @return: @@ -177,7 +178,7 @@ def update_resource_tags(self, tags: list, tag_name: str, tag_value: str): def get_cluster_delete_days(self, tags: list) -> int: """ - This method return the ClusterDeleteDays tag + This method returns the ClusterDeleteDays tag @param tags: @return: """ @@ -188,7 +189,6 @@ def get_cluster_delete_days(self, tags: list) -> int: cluster_delete_days = int(cluster_delete_days) + 1 return cluster_delete_days - @logger_time_stamp def trigger_mail(self, tags: list, resource_id: str, days: int, resources: list, message_type: str): """ This method send triggering mail @@ -254,7 +254,6 @@ def collect_notify_cluster_data(self, resource_data: dict, cluster_left_out_days delete_data.setdefault(cluster_tag, []).append({func_name: delete_tag_data[cluster_tag]}) return notify_data, delete_data, cluster_data - @logger_time_stamp def send_mails_to_cluster_user(self, notify_data: dict, delete_data: dict, cluster_data: dict): """ This method send mail to the user to notify cluster status @@ -263,17 +262,17 @@ def send_mails_to_cluster_user(self, notify_data: dict, delete_data: dict, clust @param delete_data: @return: """ - for cluster_tag, resource_ids in notify_data.items(): - self.update_resource_tags(tags=cluster_data[cluster_tag], tag_name='Name', tag_value=cluster_tag) - self.trigger_mail(tags=cluster_data[cluster_tag], resource_id=cluster_tag, - days=self.DAYS_TO_TRIGGER_RESOURCE_MAIL, - resources=resource_ids, message_type='notification') - for cluster_tag, resource_ids in delete_data.items(): - self.update_resource_tags(tags=cluster_data[cluster_tag], tag_name='Name', tag_value=cluster_tag) - self.trigger_mail(tags=cluster_data[cluster_tag], resource_id=cluster_tag, - days=self.DAYS_TO_DELETE_RESOURCE, resources=resource_ids, message_type='delete') + if self.__email_alert: + for cluster_tag, resource_ids in notify_data.items(): + self.update_resource_tags(tags=cluster_data[cluster_tag], tag_name='Name', tag_value=cluster_tag) + self.trigger_mail(tags=cluster_data[cluster_tag], resource_id=cluster_tag, + days=self.DAYS_TO_TRIGGER_RESOURCE_MAIL, + resources=resource_ids, message_type='notification') + for cluster_tag, resource_ids in delete_data.items(): + self.update_resource_tags(tags=cluster_data[cluster_tag], tag_name='Name', tag_value=cluster_tag) + self.trigger_mail(tags=cluster_data[cluster_tag], resource_id=cluster_tag, + days=self.DAYS_TO_DELETE_RESOURCE, resources=resource_ids, message_type='delete') - @logger_time_stamp def _check_zombie_cluster_deleted_days(self, resources: dict, cluster_left_out_days: dict, zombie: str, cluster_tag: str): """ This method check the cluster delete days and return the clusters diff --git a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py index ae13efbf..c5f37a9d 100644 --- a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py +++ b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py @@ -9,6 +9,7 @@ from cloud_governance.common.clouds.aws.price.resources_pricing import ResourcesPricing from cloud_governance.common.clouds.aws.s3.s3_operations import S3Operations from cloud_governance.common.elasticsearch.elastic_upload import ElasticUpload +from cloud_governance.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations from cloud_governance.common.ldap.ldap_search import LdapSearch from cloud_governance.common.logger.init_logger import logger from cloud_governance.common.mails.mail_message import MailMessage @@ -19,7 +20,7 @@ class NonClusterZombiePolicy: - DAYS_TO_DELETE_RESOURCE = 7 + DAYS_TO_DELETE_RESOURCE = environment_variables.environment_variables_dict.get('DAYS_TO_DELETE_RESOURCE') DAYS_TO_NOTIFY_ADMINS = 6 DAYS_TO_TRIGGER_RESOURCE_MAIL = 4 DAILY_HOURS = 24 @@ -48,9 +49,13 @@ def __init__(self): self._mail_description = MailMessage() self.__ldap_host_name = self.__environment_variables_dict.get('LDAP_HOST_NAME', '') self._ldap = LdapSearch(ldap_host_name=self.__ldap_host_name) + self.__email_alert = self.__environment_variables_dict.get('EMAIL_ALERT') if self.__environment_variables_dict.get('EMAIL_ALERT') else False + self.__manager_email_alert = self.__environment_variables_dict.get('MANAGER_EMAIL_ALERT') self._admins = ['athiruma@redhat.com', 'ebattat@redhat.com'] self._es_upload = ElasticUpload() self.resource_pricing = ResourcesPricing() + self._es_operations = ElasticSearchOperations() + self._es_index = self.__environment_variables_dict.get('es_index') def set_dryrun(self, value: str): self._dry_run = value @@ -95,7 +100,7 @@ def _get_tag_name_from_tags(self, tags: list, tag_name: str = 'Name'): def _calculate_days(self, create_date: datetime): """ - This method return the days + This method returns the days @return: """ today = datetime.date.today() @@ -164,30 +169,32 @@ def _trigger_mail(self, tags: list, resource_id: str, days: int, resource_type: @param resource_id: @return: """ - try: - special_user_mails = self._literal_eval(self._special_user_mails) - user, resource_name = self._get_tag_name_from_tags(tags=tags, tag_name='User'), self._get_tag_name_from_tags( - tags=tags, tag_name='Name') - if not resource_name: - resource_name = self._get_tag_name_from_tags(tags=tags, tag_name='cg-Name') - to = user if user not in special_user_mails else special_user_mails[user] - ldap_data = self._ldap.get_user_details(user_name=to) - cc = [self._account_admin, f'{ldap_data.get("managerId")}@redhat.com'] - name = to - if ldap_data: - name = ldap_data.get('displayName') - subject, body = self._mail_description.resource_message(name=name, days=days, - notification_days=self.DAYS_TO_TRIGGER_RESOURCE_MAIL, - delete_days=self.DAYS_TO_DELETE_RESOURCE, - resource_name=resource_name, resource_id=resource_id, - resource_type=resource_type, msgadmins=self.DAYS_TO_NOTIFY_ADMINS, extra_purse=kwargs.get('extra_purse')) - if not kwargs.get('admins'): - self._mail.send_email_postfix(to=to, content=body, subject=subject, cc=cc, resource_id=resource_id, message_type=kwargs.get('message_type'), extra_purse=kwargs.get('delta_cost', 0)) - else: - kwargs['admins'].append(f'{ldap_data.get("managerId")}@redhat.com') - self._mail.send_email_postfix(to=kwargs.get('admins'), content=body, subject=subject, cc=[], resource_id=resource_id, message_type=kwargs.get('message_type'), extra_purse=kwargs.get('delta_cost', 0)) - except Exception as err: - logger.info(err) + if self.__email_alert: + try: + special_user_mails = self._literal_eval(self._special_user_mails) + user, resource_name = self._get_tag_name_from_tags(tags=tags, tag_name='User'), self._get_tag_name_from_tags( + tags=tags, tag_name='Name') + if not resource_name: + resource_name = self._get_tag_name_from_tags(tags=tags, tag_name='cg-Name') + to = user if user not in special_user_mails else special_user_mails[user] + ldap_data = self._ldap.get_user_details(user_name=to) + cc = [self._account_admin, f'{ldap_data.get("managerId")}@redhat.com'] if self.__manager_email_alert else [] + name = to + if ldap_data: + name = ldap_data.get('displayName') + subject, body = self._mail_description.resource_message(name=name, days=days, + notification_days=self.DAYS_TO_TRIGGER_RESOURCE_MAIL, + delete_days=self.DAYS_TO_DELETE_RESOURCE, + resource_name=resource_name, resource_id=resource_id, + resource_type=resource_type, msgadmins=self.DAYS_TO_NOTIFY_ADMINS, extra_purse=kwargs.get('extra_purse')) + if not kwargs.get('admins'): + self._mail.send_email_postfix(to=to, content=body, subject=subject, cc=cc, resource_id=resource_id, message_type=kwargs.get('message_type'), extra_purse=kwargs.get('delta_cost', 0)) + else: + if self.__manager_email_alert: + kwargs['admins'].append(f'{ldap_data.get("managerId")}@redhat.com') + self._mail.send_email_postfix(to=kwargs.get('admins'), content=body, subject=subject, cc=[], resource_id=resource_id, message_type=kwargs.get('message_type'), extra_purse=kwargs.get('delta_cost', 0)) + except Exception as err: + logger.info(err) def _update_tag_value(self, tags: list, tag_name: str, tag_value: str): """ @@ -231,7 +238,7 @@ def __delete_resource_on_name(self, resource_id: str): self._ec2_client.delete_volume(VolumeId=resource_id) elif self._policy == 'ip_unattached': self._ec2_client.release_address(AllocationId=resource_id) - elif self._policy == 'nat_gateway_unused': + elif self._policy == 'unused_nat_gateway': self._ec2_client.delete_nat_gateway(NatGatewayId=resource_id) elif self._policy == 'zombie_snapshots': self._ec2_client.delete_snapshot(SnapshotId=resource_id) @@ -287,7 +294,7 @@ def _update_resource_tags(self, resource_id: str, left_out_days: int, tags: list self._s3_client.put_bucket_tagging(Bucket=resource_id, Tagging={'TagSet': tags}) elif self._policy == 'empty_roles': self._iam_client.tag_role(RoleName=resource_id, Tags=tags) - elif self._policy in ('ip_unattached', 'nat_gateway_unused', 'zombie_snapshots'): + elif self._policy in ('ip_unattached', 'unused_nat_gateway', 'zombie_snapshots'): self._ec2_client.create_tags(Resources=[resource_id], Tags=tags) except Exception as err: logger.info(f'Exception raised: {err}: {resource_id}') diff --git a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py index cc3bcdf0..2e22b291 100644 --- a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py +++ b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py @@ -1,5 +1,6 @@ import importlib import inspect +from datetime import datetime from cloud_governance.common.logger.init_logger import logger from cloud_governance.policy.policy_operations.aws.zombie_non_cluster.run_zombie_non_cluster_policies import NonClusterZombiePolicy @@ -26,9 +27,22 @@ def run(self): else: logger.info(f'key: {cls[0]}, count: {len(response)}, {response}') policy_result = response + + if self._es_operations.check_elastic_search_connection(): + if policy_result: + for policy_dict in policy_result: + policy_dict['region_name'] = self._region + policy_dict['account'] = self._account + self._es_operations.upload_to_elasticsearch(data=policy_dict, index=self._es_index) + logger.info(f'Uploaded the policy results to elasticsearch index: {self._es_index}') + else: + logger.error(f'No data to upload on @{self._account} at {datetime.utcnow()}') + else: + logger.error('ElasticSearch host is not pingable, Please check ') + if self._policy_output: - if self._policy not in ('ec2_idle', 'ebs_in_use', 'ec2_run'): - beautify_data = self._beautify_upload_data(upload_resource_data=response) - policy_result = {'count': len(beautify_data), self._policy: beautify_data} + # if self._policy not in ('ec2_idle', 'ebs_in_use', 'ec2_run', 's3_inactive', 'zombie_snapshots', 'nat_gateway_unused'): + # beautify_data = self._beautify_upload_data(upload_resource_data=response) + # policy_result = {'count': len(beautify_data), self._policy: beautify_data} logger.info(policy_result) self._s3operations.save_results_to_s3(policy=self._policy.replace('_', '-'), policy_output=self._policy_output, policy_result=policy_result) diff --git a/cloud_governance/policy/policy_operations/gcp/__init__.py b/cloud_governance/policy/policy_operations/gcp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloud_governance/policy/policy_operations/gcp/gcp_policy_runner.py b/cloud_governance/policy/policy_operations/gcp/gcp_policy_runner.py new file mode 100644 index 00000000..01675d33 --- /dev/null +++ b/cloud_governance/policy/policy_operations/gcp/gcp_policy_runner.py @@ -0,0 +1,29 @@ +import importlib +import inspect + +from cloud_governance.common.jira.jira import logger +from cloud_governance.main.environment_variables import environment_variables + + +class GcpPolicyRunner: + """ + This method run the azure policies + """ + + def __init__(self): + self.__environment_variables_dict = environment_variables.environment_variables_dict + self._policy = self.__environment_variables_dict.get('policy') + self._cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME') + + def run(self): + """ + Run the azure policies + @return: + """ + azure_policies = importlib.import_module(f'cloud_governance.policy.gcp.{self._policy}') + logger.info(f'Account: {self._cloud_name}, Policy: {self._policy}') + for cls in inspect.getmembers(azure_policies, inspect.isclass): + if self._policy.replace('_', '') == cls[0].lower(): + response = cls[1]().run() + if isinstance(response, list) and len(response) > 0: + logger.info(f'key: {cls[0]}, count: {len(response)}, {response}') diff --git a/cloud_governance/policy/policy_operations/gitleaks/gitleaks.py b/cloud_governance/policy/policy_operations/gitleaks/gitleaks.py index 31cf3670..a559fac4 100644 --- a/cloud_governance/policy/policy_operations/gitleaks/gitleaks.py +++ b/cloud_governance/policy/policy_operations/gitleaks/gitleaks.py @@ -39,7 +39,7 @@ def __delete_gitleaks_report(self): def __get_gitleaks_report(self): """ - This method return dict report content + This method returns dict report content """ report_file = self.__report_file_full_path if os.path.isfile(report_file): diff --git a/cloud_governance/policy/policy_runners/__init__.py b/cloud_governance/policy/policy_runners/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloud_governance/policy/policy_runners/common_policy_runner.py b/cloud_governance/policy/policy_runners/common_policy_runner.py new file mode 100644 index 00000000..4f54fd7a --- /dev/null +++ b/cloud_governance/policy/policy_runners/common_policy_runner.py @@ -0,0 +1,29 @@ +import importlib +import inspect + +from cloud_governance.common.logger.init_logger import logger +from cloud_governance.main.environment_variables import environment_variables + + +class CommonPolicyRunner: + """ + This method run the Common policies + """ + + def __init__(self): + self.__environment_variables_dict = environment_variables.environment_variables_dict + self._policy = self.__environment_variables_dict.get('policy') + self._cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME') + + def run(self): + """ + Run the Common policies + @return: + """ + azure_policies = importlib.import_module(f'cloud_governance.policy.common_policies.{self._policy}') + logger.info(f'Account: {self._cloud_name}, Policy: {self._policy}, CLOUD_NAME: {self._cloud_name}') + for cls in inspect.getmembers(azure_policies, inspect.isclass): + if self._policy.replace('_', '') == cls[0].lower(): + response = cls[1]().run() + if isinstance(response, list) and len(response) > 0: + logger.info(f'key: {cls[0]}, count: {len(response)}, {response}') diff --git a/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instance_types_pricing.py b/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instance_types_pricing.py new file mode 100644 index 00000000..700f12fa --- /dev/null +++ b/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instance_types_pricing.py @@ -0,0 +1,74 @@ +import json +import os + +import boto3 +from pkg_resources import resource_filename + + +class InstanceTypes: + + def __init__(self): + self.ec2_client = boto3.client('ec2', region_name='us-east-1') + self.__client = boto3.client('pricing', region_name='us-east-1') + + def get_instance_types(self, region_name: str): + """This method fetch all instance types""" + instance_types = [] + ec2_client = boto3.client('ec2', region_name=region_name) + response = ec2_client.describe_instance_types() + instance_types.extend([ins_type['InstanceType'] for ins_type in response.get('InstanceTypes')]) + while 'NextToken' in response: + response = ec2_client.describe_instance_types(NextToken=response.get('NextToken')) + instance_types.extend([ins_type['InstanceType'] for ins_type in response.get('InstanceTypes')]) + return sorted(instance_types) + + def get_region_name(self, region_code): + """ + This method return region name + @param region_code: + @return: + """ + default_region = 'us-east-1' + endpoint_file = resource_filename('botocore', 'data/endpoints.json') + try: + with open(endpoint_file, 'r') as f: + data = json.load(f) + return data['partitions'][0]['regions'][region_code]['description'] + except IOError: + return default_region + + def instance_price(self, region_name: str, instance_type: str): + """This method give price of instance type in a region""" + FLT = '[{{"Field": "tenancy", "Value": "shared", "Type": "TERM_MATCH"}},' \ + '{{"Field": "operatingSystem", "Value": "Linux", "Type": "TERM_MATCH"}},' \ + '{{"Field": "preInstalledSw", "Value": "NA", "Type": "TERM_MATCH"}},' \ + '{{"Field": "instanceType", "Value": "{t}", "Type": "TERM_MATCH"}},' \ + '{{"Field": "location", "Value": "{r}", "Type": "TERM_MATCH"}},' \ + '{{"Field": "capacitystatus", "Value": "Used", "Type": "TERM_MATCH"}}]' + f = FLT.format(r=self.get_region_name(region_name), t=instance_type) + try: + data = self.__client.get_products(ServiceCode='AmazonEC2', Filters=json.loads(f)) + od = json.loads(data['PriceList'][0])['terms']['OnDemand'] + id1 = list(od)[0] + id2 = list(od[id1]['priceDimensions'])[0] + return od[id1]['priceDimensions'][id2]['pricePerUnit']['USD'] + except Exception as err: + return 0 + + def instance_prices(self): + """This method get the instance prices based on instance_type""" + # regions = self.ec2_client.describe_regions()['Regions'] + # aws_pricing = {} + # for region in regions: + region_pricing = {} + instance_types = self.get_instance_types(region_name='us-west-2') + for instance_type in instance_types: + price = self.instance_price(region_name='us-west-2', instance_type=instance_type) + if float(price) > 0: + region_pricing[instance_type] = round(float(price), 4) + # aws_pricing[region['RegionName']] = region_pricing + with open('instances_price.json', 'w') as file: + json.dump(region_pricing, file, indent=4) + + +# InstanceTypes().instance_prices() diff --git a/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instances_price.json b/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instances_price.json index 0e21982e..dc99790d 100644 --- a/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instances_price.json +++ b/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instances_price.json @@ -123,6 +123,7 @@ "c6in.4xlarge": 0.9072, "c6in.8xlarge": 1.8144, "c6in.large": 0.1134, + "c6in.metal": 7.2576, "c6in.xlarge": 0.2268, "c7g.12xlarge": 1.74, "c7g.16xlarge": 2.32, @@ -131,8 +132,8 @@ "c7g.8xlarge": 1.16, "c7g.large": 0.0725, "c7g.medium": 0.0363, + "c7g.metal": 2.32, "c7g.xlarge": 0.145, - "cc2.8xlarge": 2.0, "d2.2xlarge": 1.38, "d2.4xlarge": 2.76, "d2.8xlarge": 5.52, @@ -206,6 +207,12 @@ "i3en.large": 0.226, "i3en.metal": 10.848, "i3en.xlarge": 0.452, + "i4g.16xlarge": 4.9421, + "i4g.2xlarge": 0.6178, + "i4g.4xlarge": 1.2355, + "i4g.8xlarge": 2.471, + "i4g.large": 0.1544, + "i4g.xlarge": 0.3089, "i4i.16xlarge": 5.491, "i4i.2xlarge": 0.686, "i4i.32xlarge": 10.9824, @@ -363,6 +370,7 @@ "m6idn.4xlarge": 1.273, "m6idn.8xlarge": 2.5459, "m6idn.large": 0.1591, + "m6idn.metal": 10.1837, "m6idn.xlarge": 0.3182, "m6in.12xlarge": 3.3415, "m6in.16xlarge": 4.4554, @@ -372,7 +380,17 @@ "m6in.4xlarge": 1.1138, "m6in.8xlarge": 2.2277, "m6in.large": 0.1392, + "m6in.metal": 8.9107, "m6in.xlarge": 0.2785, + "m7g.12xlarge": 1.9584, + "m7g.16xlarge": 2.6112, + "m7g.2xlarge": 0.3264, + "m7g.4xlarge": 0.6528, + "m7g.8xlarge": 1.3056, + "m7g.large": 0.0816, + "m7g.medium": 0.0408, + "m7g.metal": 2.6112, + "m7g.xlarge": 0.1632, "p2.16xlarge": 14.4, "p2.8xlarge": 7.2, "p2.xlarge": 0.9, @@ -510,6 +528,7 @@ "r6idn.4xlarge": 1.5631, "r6idn.8xlarge": 3.1262, "r6idn.large": 0.1954, + "r6idn.metal": 12.505, "r6idn.xlarge": 0.3908, "r6in.12xlarge": 4.1839, "r6in.16xlarge": 5.5786, @@ -519,7 +538,17 @@ "r6in.4xlarge": 1.3946, "r6in.8xlarge": 2.7893, "r6in.large": 0.1743, + "r6in.metal": 11.1571, "r6in.xlarge": 0.3487, + "r7g.12xlarge": 2.5704, + "r7g.16xlarge": 3.4272, + "r7g.2xlarge": 0.4284, + "r7g.4xlarge": 0.8568, + "r7g.8xlarge": 1.7136, + "r7g.large": 0.1071, + "r7g.medium": 0.0536, + "r7g.metal": 3.4272, + "r7g.xlarge": 0.2142, "t1.micro": 0.02, "t2.2xlarge": 0.3712, "t2.large": 0.0928, @@ -551,6 +580,7 @@ "t4g.xlarge": 0.1344, "trn1.2xlarge": 1.3438, "trn1.32xlarge": 21.5, + "trn1n.32xlarge": 24.78, "u-12tb1.112xlarge": 109.2, "u-18tb1.112xlarge": 163.8, "u-3tb1.56xlarge": 27.3, @@ -602,4 +632,4 @@ "z1d.large": 0.186, "z1d.metal": 4.464, "z1d.xlarge": 0.372 -} +} \ No newline at end of file diff --git a/cloudsensei/README.md b/cloudsensei/README.md new file mode 100644 index 00000000..03dc3967 --- /dev/null +++ b/cloudsensei/README.md @@ -0,0 +1,88 @@ +## CloudSensei + +CloudSensei is an effort to uncover potential cloud resource leaks which might lead to inefficient cloud management. Currently, CloudSensei helps generate a daily report allowing stakeholders to action on long-running EC2 instances. + +#### To-Do: +Eventually, CloudSensei will be flipped to work as a Slackbot, allowing users to join a “read-only” Slack channel to review daily expense reports for instance. + +### How it works? +To implement this functionality, CloudSensei utilizes AWS Lambda + EventBridge. +The EventBridge Scheduler (CronJob) will run on every day 17:00hrs IST. + + +#### How to send Slack notifications on Slack? +1. Create a new Slack bot on your slack workspace, add it to desired channel +2. Generate [OAuth](https://api.slack.com/authentication/token-types#bot) Token for Slack Bot +3. Use [Block Kit](https://api.slack.com/block-kit) to build message formats. +4. Use Slack [postMessage API](https://api.slack.com/methods/chat.postMessage) to post messages to Slack channel + +#### Steps to create Slack bot: +1. Go to [api.slack.com](https://api.slack.com/) +2. Click on **Your apps** and click on **Manage your apps**. +3. Click on **Create New App**. +4. Select create from scratch. + 1. Enter necessary fields and create app +5. A Basic information tab will open, select options **Bots**. +6. Click on **OAuth & Permissions** on left panel + 1. Click on + 2. Under the scopes, add only **Bots Token Scopes** + 3. Under the **OAuth Tokens for Your Workspace**, Submit **Install to Workspace** and allow access. + +#### Adding Slack bot to your channel + +To Create a Lambda function & integrate with EventBridge you must export some env variables: + +Fill the env.txt file to export environment variables + +To store the data of long-running instances in elastic search +export below variables +```commandline +ACCOUNT_ID=$ +AWS_DEFAULT_REGION=$ +RESOURCE_DAYS=$ +SEND_AGG_MAIL=$ +ES_SERVER=$ +``` + +To send mail of the long-running instances +export below variables +```commandline +ACCOUNT_ID=$ +AWS_DEFAULT_REGION=$ +RESOURCE_DAYS=$ +SES_HOST_ADDRESS=$ +SES_HOST_PORT=$ +SES_USER_ID=$ +SES_PASSWORD=$ +TO_ADDRESS=$ +CC_ADDRESS=$ +``` + +To send Slack notifications in Slack channel +export below variables +```commandline +ACCOUNT_ID=$ +AWS_DEFAULT_REGION=$ +RESOURCE_DAYS=$ +SLACK_API_TOKEN=$ +SLACK_CHANNEL_NAME=$ +``` + +Note: Use env.txt to export above varibales + +```commandline +git clone https://github.com/redhat-performance/cloud-governance +cd cloud-governance/cloudsensei/ +./run.sh deploy +# Copy the tfstate file backup, incase it is deleted, we cannot retrieve it +``` + +To delete the Lambda + Event_bridge service [ ** must have the tfstate file] +```commandline +cd cloudsensei +./run.sh destroy +``` + +##### Limits of BlockKit + +1. Can only send 50 item per block. diff --git a/cloudsensei/agg_lambda/lambda_function.py b/cloudsensei/agg_lambda/lambda_function.py new file mode 100644 index 00000000..cfd96c5f --- /dev/null +++ b/cloudsensei/agg_lambda/lambda_function.py @@ -0,0 +1,43 @@ +import json +import logging +import os +from datetime import datetime +from time import time + +from es_operations import ESOperations +from send_email import send_email_with_ses + + +def lambda_handler(event, context): + """ + This lambda function sends notifications to slack on long running resources on AWS Cloud + :param event: + :param context: + :return: + """ + start_time = time() + logging.info(f"{lambda_handler.__name__} started at {datetime.utcnow()}") + aws_accounts = ["perf-dept", "openshift-perfscale", "openshift-psap"] + code = 400 + message = "Something went wrong check your es_data" + es_operations = ESOperations() + email_body = "" + subject = "Weekly Cloud Report: Long running instances in the Perf&Scale AWS Accounts" + for account in aws_accounts: + current_date = str(datetime.utcnow().date()) + index_id = f"{account}-{current_date}" + es_data = es_operations.get_es_data_by_id(index_id) + if es_data: + email_body += f"

    Cloud Report: Long running instances in the @{account} account

    " + email_body += es_data.get('_source').get('body') + email_body += "
    " + response = send_email_with_ses(body=email_body, subject=subject, to=os.environ.get('TO_ADDRESS'), cc=os.environ.get('CC_ADDRESS')) + if response: + code = 200 + message = "Successfully sent an emails" + end_time = time() + return { + 'statusCode': code, + 'body': json.dumps(message), + 'total_running_time': f"{end_time - start_time} s" + } diff --git a/cloudsensei/agg_lambda/run.sh b/cloudsensei/agg_lambda/run.sh new file mode 100755 index 00000000..81cbffbf --- /dev/null +++ b/cloudsensei/agg_lambda/run.sh @@ -0,0 +1,22 @@ +PROJECT_NAME="AggFunction" +SUCCESS_OUTPUT_PATH="/dev/null" +ERROR_LOG="$(mktemp -d)/stderr.log" + + +echo "Clearing if previously created zip file" +PROJECT_PATH="$PWD/$PROJECT_NAME.zip" +if [ -f $PROJECT_PATH ]; then + rm -rf $PROJECT_PATH + rm -rf ./package + echo "Deleted Previously created zip file" +fi + +pip install --upgrade pip +pip install --target ./package -r ../requirements.txt > $SUCCESS_OUTPUT_PATH +pushd package +zip -r ../$PROJECT_NAME.zip . > $SUCCESS_OUTPUT_PATH +popd +zip -g $PROJECT_NAME.zip lambda_function.py > $SUCCESS_OUTPUT_PATH +zip -g $PROJECT_NAME.zip ../es_operations.py > $SUCCESS_OUTPUT_PATH +zip -g $PROJECT_NAME.zip ../send_email.py > $SUCCESS_OUTPUT_PATH +aws lambda update-function-code --function-name CloudSenseiAggFunction --zip-file fileb://$PROJECT_PATH --region $AWS_DEFAULT_REGION > $SUCCESS_OUTPUT_PATH diff --git a/cloudsensei/email_template.j2 b/cloudsensei/email_template.j2 new file mode 100644 index 00000000..5c4019b9 --- /dev/null +++ b/cloudsensei/email_template.j2 @@ -0,0 +1,26 @@ +{% set style="border-collapse:collapse;border:2px solid black;padding: 10px" %} +
    + + + + {% for keys in keys_list %} + + {% endfor %} + + + + {% for user, region_list in resources_list.items() %} + {% for region_name, resources_list in region_list.items() %} + {% for resources in resources_list %} + {% set _ = resources.update({'User': user, 'Region': region_name}) %} + + {% for key in keys_list %} + + {% endfor %} + + {% endfor %} + {% endfor %} + {% endfor %} + +
    {{keys}}
    {{resources[key]}}
    +
    \ No newline at end of file diff --git a/cloudsensei/env.sh b/cloudsensei/env.sh new file mode 100644 index 00000000..47aca7ac --- /dev/null +++ b/cloudsensei/env.sh @@ -0,0 +1,14 @@ +export ACCOUNT_ID=${ACCOUNT_ID:-""} +export AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION:-"us-east-1"} +export SEND_AGG_MAIL=${SEND_AGG_MAIL:-"yes"} +export SLACK_API_TOKEN=${SLACK_API_TOKEN:-""} +export SLACK_CHANNEL_NAME=${SLACK_CHANNEL_NAME:-""} +export SES_HOST_ADDRESS=${SES_HOST_ADDRESS:-""} +export SES_HOST_PORT=${SES_HOST_PORT:-0} +export SES_USER_ID=${SES_USER_ID:-""} +export SES_PASSWORD=${SES_PASSWORD:-""} +export TO_ADDRESS=${TO_ADDRESS:-""} +export CC_ADDRESS=${CC_ADDRESS:-""} +export RESOURCE_DAYS=${RESOURCE_DAYS:-7} +export ES_SERVER=${ES_SERVER:-""} +export S3_BUCKET=${S3_BUCKET:-""} diff --git a/cloudsensei/es_operations.py b/cloudsensei/es_operations.py new file mode 100644 index 00000000..1407bb6f --- /dev/null +++ b/cloudsensei/es_operations.py @@ -0,0 +1,45 @@ +import logging +import os +from datetime import datetime + +from elasticsearch import Elasticsearch + + +class ESOperations: + """ + This class performs es operations + """ + + ES_INDEX = "cloudsensei" + ES_DOC = '_doc' + + def __init__(self): + self.__es_server = os.environ.get('ES_SERVER') + self.__es = Elasticsearch(self.__es_server) + + def upload_to_es(self, data: dict, **kwargs): + """ + This method uploads data to es + :return: + """ + if not data.get('timestamp'): + data['timestamp'] = datetime.utcnow() # datetime.now() + # Upload data to elastic search server + try: + self.__es.index(index=self.ES_INDEX, doc_type=self.ES_DOC, body=data, **kwargs) + return True + except Exception as err: + raise err + + def get_es_data_by_id(self, es_id: str): + """ + This method fetch the data from the es based on the id + :param es_id: + :return: + """ + try: + es_data = self.__es.get(index=self.ES_INDEX, id=es_id) + except Exception as err: + logging.error(err) + es_data = {} + return es_data diff --git a/cloudsensei/lambda_function.py b/cloudsensei/lambda_function.py new file mode 100644 index 00000000..0fda44f8 --- /dev/null +++ b/cloudsensei/lambda_function.py @@ -0,0 +1,212 @@ +import json +import logging +import os +from time import time + +import boto3 +from datetime import datetime +from jinja2 import Template + +from es_operations import ESOperations +from send_email import send_email_with_ses +from slack_operations import SlackOperations + + +class EC2Operations: + """ + This class performs the ec2 operations + """ + + SLACK_ITEM_SIZE = 50 + + def __init__(self): + self.__ec2_client = boto3.client('ec2', region_name='us-east-1') + self.__iam_client = boto3.client('iam') + self.__resource_days = int(os.environ.get('RESOURCE_DAYS', 7)) + + def set_ec2_client(self, region_name: str): + """ + This method change the ec2_client object with another region + :param region_name: + :return: + """ + self.__ec2_client = boto3.client('ec2', region_name=region_name) + + def __get_all_instances(self): + """ + This method returns all instances in a region + :return: + """ + resource_list = [] + resources = self.__ec2_client.describe_instances() + resource_list.extend(resources['Reservations']) + while 'NextToken' in resources.keys(): + resources = self.__ec2_client.describe_instances(NextToken=resources['NextToken']) + resource_list.extend(resources['Reservations']) + return resource_list + + def get_resources(self): + """ + This method returns all the instances running more than 7 days + :return: + """ + regions = self.__ec2_client.describe_regions()['Regions'] + current_datetime = datetime.utcnow().date() + long_running_instances_by_user = {} + for region in regions: + region_name = region['RegionName'] + self.set_ec2_client(region_name) + instances_list = self.__get_all_instances() + for instances in instances_list: + for resource in instances['Instances']: + skip = False + launch_time = resource.get('LaunchTime').date() + days = (current_datetime - launch_time).days + if days > self.__resource_days: + user = name = None + for tag in resource.get('Tags', []): + tag_key = tag.get('Key').lower() + if tag_key.lower() == 'cloudsensei': + skip = True + break + if tag_key == 'user': + user = tag.get('Value') + elif tag_key == 'name': + name = tag.get('Value') + if not skip and user: + long_running_instances_by_user.setdefault(user.lower(), {}).setdefault(region_name, []).append( + {'InstanceId': resource.get('InstanceId'), + 'Name': name, 'LaunchDate': str(launch_time), + 'RunningDays': f"{days} days", 'State': resource.get('State', {}).get('Name')}) + return long_running_instances_by_user + + def get_account_alias_name(self): + """ + This method returns the account alias name + :return: + """ + response = self.__iam_client.list_account_aliases() + account_alias = response['AccountAliases'][0] + return account_alias + + def organize_message_to_send_slack(self, resources_list: dict): + """ + This method returns the message to send to slack + :param resources_list: + :return: + """ + + divider = {"type": "divider"} + keys_list = ['User', 'Region', 'Name', 'InstanceId', 'LaunchDate', 'RunningDays'] + rows = [] + for user, region_list in resources_list.items(): + for region_name, resources_list in region_list.items(): + for resources in resources_list: + if resources: + resources.update({'User': user, 'Region': region_name}) + rows.append({ + "type": "section", + "fields": [{"type": "mrkdwn", "text": f"{str(resources.get(item))}"} for item in keys_list], + }) + rows.append(divider) + item_blocks = [rows[i:i + self.SLACK_ITEM_SIZE] for i in range(0, len(rows), self.SLACK_ITEM_SIZE)] # splitting because slack block allows only 50 items + slack_message_block = [] + for block in item_blocks: + slack_message_block.append(block) + return slack_message_block + + def organize_message_to_seng_mail(self, resources_list: dict): + """ + This method returns the mail message + :param resources_list: + :return: + """ + keys_list = ['User', 'Region', 'Name', 'InstanceId', 'LaunchDate', 'State', 'RunningDays'] + with open('email_template.j2') as template: + template = Template(template.read()) + body = template.render({'resources_list': resources_list, 'keys_list': keys_list}) + return body + + +class ProcessData: + def __init__(self, subject): + self.__subject = subject + + def send_email(self, organized_ec2_data): + """ + This method send email + :return: + """ + response = send_email_with_ses(body=organized_ec2_data, subject=self.__subject, to=os.environ.get('TO_ADDRESS'), + cc=os.environ.get('CC_ADDRESS')) + if response: + return 200, "Successfully sent an emails" + return 400, 'Something went wrong' + + def save_to_elastic_search(self, organized_ec2_data, account_name): + """ + This method saves the data in elasticsearch + :return: + """ + es_operations = ESOperations() + data = { + 'body': organized_ec2_data, + 'subject': self.__subject, + 'index_id': f"{account_name.lower()}-{str(datetime.utcnow().date())}" + } + if es_operations.upload_to_es(data=data, id=data.get('index_id')): + return 200, "Successfully save date in elastic search" + return 400, 'Something went wrong' + + def post_message_in_slack(self, slack_blocks, account_name): + """ + This method posts message in slack + :return: + """ + slack_operations = SlackOperations() + thread_ts = slack_operations.create_thread(account_name=account_name) + code = 400 + message = 'Something went wrong, while posting to slack' + if thread_ts: + message = slack_operations.post_message_blocks_in_thread(message_blocks=slack_blocks, thread_ts=thread_ts) + code = 200 + return code, message + + +def lambda_handler(event, context): + """ + This lambda function sends notifications to slack on lon running resources on AWS Cloud + :param event: + :param context: + :return: + """ + start_time = time() + logging.info(f"{lambda_handler.__name__} started at {datetime.utcnow()}") + code = 400 + message = "Something went wrong while sending the Notification" + extra_message = '' + ec2_operations = EC2Operations() + account_name = ec2_operations.get_account_alias_name() + process_data = ProcessData(subject=f'Daily Cloud Report: Long running instances in the @{account_name} account') + if os.environ.get("SLACK_API_TOKEN"): + slack_blocks = ec2_operations.organize_message_to_send_slack(ec2_operations.get_resources()) + code, message = process_data.post_message_in_slack(slack_blocks=slack_blocks, account_name=account_name) + else: + organized_ec2_data = ec2_operations.organize_message_to_seng_mail(ec2_operations.get_resources()) + if os.environ.get("SEND_AGG_MAIL", "no").lower() == "yes": + code, message = process_data.save_to_elastic_search(organized_ec2_data, account_name=account_name) + if os.environ.get('SES_HOST_ADDRESS'): + code, message = process_data.send_email(organized_ec2_data) + elif os.environ.get('SES_HOST_ADDRESS'): + if os.environ.get('SES_HOST_ADDRESS'): + code, message = process_data.send_email(organized_ec2_data) + else: + organized_ec2_data = ec2_operations.get_resources() + message = organized_ec2_data + code = 200 + end_time = time() + return { + 'statusCode': code, + 'body': json.dumps({'message': message, 'extra_message': extra_message}), + 'total_running_time': f"{end_time - start_time} s" + } diff --git a/cloudsensei/requirements.txt b/cloudsensei/requirements.txt new file mode 100644 index 00000000..1b22ecc6 --- /dev/null +++ b/cloudsensei/requirements.txt @@ -0,0 +1,4 @@ +boto3==1.26.1 +requests==2.31.0 +jinja2==3.1.2 +elasticsearch==7.11.0 diff --git a/cloudsensei/run.sh b/cloudsensei/run.sh new file mode 100755 index 00000000..b0b0e1c9 --- /dev/null +++ b/cloudsensei/run.sh @@ -0,0 +1,78 @@ +PROJECT_NAME="CloudSensei" +SUCCESS_OUTPUT_PATH="/dev/null" +ERROR_LOG="$(mktemp -d)/stderr.log" + +source ./env.sh + + +action="$1" + +if [ -d "./terraform/.terraform" ]; then + echo "Deleting the existing .terraform folder" + rm -rf "./terraform/.terraform" +fi + +if [ "$action" = "deploy" ]; then + echo "Clearing if previously created zip file" + PROJECT_PATH="$PWD/$PROJECT_NAME.zip" + if [ -f $PROJECT_PATH ]; then + rm -rf $PROJECT_PATH + rm -rf ./package + echo "Deleted Previously created zip file" + fi + + pip install --upgrade pip + pip install --target ./package -r ./requirements.txt > $SUCCESS_OUTPUT_PATH + pushd ./package + zip -r ../$PROJECT_NAME.zip . > $SUCCESS_OUTPUT_PATH + popd + zip -g $PROJECT_NAME.zip lambda_function.py > $SUCCESS_OUTPUT_PATH + zip -g $PROJECT_NAME.zip email_template.j2 > $SUCCESS_OUTPUT_PATH + zip -g $PROJECT_NAME.zip slack_operations.py > $SUCCESS_OUTPUT_PATH + zip -g $PROJECT_NAME.zip es_operations.py > $SUCCESS_OUTPUT_PATH + zip -g $PROJECT_NAME.zip send_email.py > $SUCCESS_OUTPUT_PATH + + pushd ./terraform + echo "#############################" + echo "Creating the lambda lambda_function using terraform" + if [ -n "$ACCOUNT_ID" ]; then + echo "Generating jinja files and tfvars file" + python ./Template.py + echo "Completed Generating tfvars file and jinja file" + if command -v terraform; then + if [ -s "$ERROR_LOG" ]; then + rm -f "$ERROR_LOG" + echo "Removed the stderr file if present" + fi + terraform init 1> $SUCCESS_OUTPUT_PATH + terraform state pull + terraform apply -var-file="./input_vars.tfvars" -auto-approve 2> "$ERROR_LOG" + if [[ -s "$ERROR_LOG" ]]; then + cat $ERROR_LOG + terraform destroy -var-file="./input_vars.tfvars" -auto-approve + echo "Validate your credentials/ Check the output" + else + echo "Successfully Created the lambda lambda_function" + fi + else + echo "Please install terraform install your local machine" + fi + else + echo "AWS ACCOUNT_ID is missing, please export the variable" + fi + echo "#############################" + popd +else + pushd ./terraform + if [ "$action" = "destroy" ]; then + echo "Generating jinja files and tfvars file" + python ./Template.py + echo "Completed Generating tfvars file and jinja file" + terraform init 1> $SUCCESS_OUTPUT_PATH + terraform state pull + terraform destroy -var-file="./input_vars.tfvars" -auto-approve + else + echo "Invalid argument passed, supported only deploy, destroy" + fi + popd +fi diff --git a/cloudsensei/send_email.py b/cloudsensei/send_email.py new file mode 100644 index 00000000..3363aa73 --- /dev/null +++ b/cloudsensei/send_email.py @@ -0,0 +1,42 @@ +import logging +import ssl +import os +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText +from smtplib import SMTP + + +def send_email_with_ses(to: any, body: str, subject: str, cc: any = None): + """ + This method sends the mail + :param subject: + :param to: + :param body: + :param cc: + :return: + """ + host = os.environ.get("SES_HOST_ADDRESS", '') + port = int(os.environ.get("SES_HOST_PORT", 0)) + user = os.environ.get("SES_USER_ID", ) + password = os.environ.get("SES_PASSWORD", '') + if host and port and user and password: + context = ssl.create_default_context() + msg = MIMEMultipart('alternative') + msg["Subject"] = subject + msg["From"] = "noreply@aws.rhperfscale.org" + msg["To"] = ", ".join(to) if type(to) == list else to + if cc: + msg["Cc"] = ",".join(cc) if type(cc) == list else cc + msg.attach(MIMEText(body, 'html')) + try: + with SMTP(host, port) as server: + server.starttls(context=context) + server.login(user=user, password=password) + server.send_message(msg) + logging.info(f"Successfully sent mail To: {to}, Cc: {cc}") + return True + except Exception as err: + logging.error(f"Error raised: {err}") + else: + logging.info("Missing mailing fields, please check did you pass all fields") + return False diff --git a/cloudsensei/slack_operations.py b/cloudsensei/slack_operations.py new file mode 100644 index 00000000..aedceee1 --- /dev/null +++ b/cloudsensei/slack_operations.py @@ -0,0 +1,66 @@ +import logging +import os +from datetime import datetime + +import requests + + +class SlackOperations: + """ + This class performs the Slack operations + """ + + SLACK_POST_API = 'https://slack.com/api/chat.postMessage' # API to post messages in slack + + def __init__(self): + self.__slack_auth_token = os.environ['SLACK_API_TOKEN'] + self.__channel_name = f'#{os.environ["SLACK_CHANNEL_NAME"]}' # before entering channel add your app to this channel + self.api_headers = { + 'Content-Type': 'application/json', + 'Authorization': f'Bearer {self.__slack_auth_token}' + } + + def post_message(self, blocks: list, thread_ts: str = None): + """ + This method post block message in slack + :param thread_ts: + :param blocks: + :return: + """ + json_data = { + 'channel': self.__channel_name, + 'blocks': blocks + } + if thread_ts: + json_data['thread_ts'] = thread_ts + response = requests.post(url=self.SLACK_POST_API, headers=self.api_headers, json=json_data) + response_data = response.json() + return response_data + + def create_thread(self, account_name: str): + """ + This method sends the header first to create a thread in slack + :return: + """ + header = f":zap: Daily Report @ {datetime.utcnow().date()}: Account *{account_name.title()}* has following long running instances" + blocks = [{"type": "section", "text": {"type": "mrkdwn", "text": header}}] + response_data = self.post_message(blocks=blocks) + if response_data: + if response_data.get('ok'): + logging.info(f"Successfully Created a Thread @timestamp={response_data.get('ts')}") + return response_data.get('ts') + return None + + def post_message_blocks_in_thread(self, message_blocks: list, thread_ts: str): + """ + This method post messages in thread + :param message_blocks: + :param thread_ts: + :return: + """ + success_sends = 0 + for index, block in enumerate(message_blocks): + response = self.post_message(blocks=block, thread_ts=thread_ts) + if response.get('ok'): + success_sends += 1 + return f"Total blocks: {len(message_blocks)}, Total Successes blocks: {success_sends}" diff --git a/cloudsensei/terraform/CloudSenseiLambdaPolicy.j2 b/cloudsensei/terraform/CloudSenseiLambdaPolicy.j2 new file mode 100644 index 00000000..66fb21b1 --- /dev/null +++ b/cloudsensei/terraform/CloudSenseiLambdaPolicy.j2 @@ -0,0 +1,29 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "iam:ListAccountAliases" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "logs:CreateLogGroup", + "Resource": "arn:aws:logs:{{AWS_DEFAULT_REGION}}:{{ACCOUNT_ID}}:*" + }, + { + "Effect": "Allow", + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Resource": [ + "arn:aws:logs:{{AWS_DEFAULT_REGION}}:{{ACCOUNT_ID}}:log-group:/aws/lambda/CloudSensei:*" + ] + } + ] +} diff --git a/cloudsensei/terraform/CloudSenseiLambdaRole.json b/cloudsensei/terraform/CloudSenseiLambdaRole.json new file mode 100644 index 00000000..fd267525 --- /dev/null +++ b/cloudsensei/terraform/CloudSenseiLambdaRole.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/cloudsensei/terraform/Template.py b/cloudsensei/terraform/Template.py new file mode 100644 index 00000000..f99ced61 --- /dev/null +++ b/cloudsensei/terraform/Template.py @@ -0,0 +1,60 @@ +import os + +from jinja2 import Environment, FileSystemLoader, Template + + +def inject_variables(): + """ + This method injects the variables into the jinja file and create a json file + :return: + """ + account_id = os.environ.get('ACCOUNT_ID', 1) + aws_region = os.environ.get('AWS_DEFAULT_REGION', 'us-east-1') + s3_bucket_name = os.environ.get('S3_BUCKET') + if account_id: + with open('CloudSenseiLambdaPolicy.j2') as file: + template_loader = Template(file.read()) + with open('./CloudSenseiLambdaPolicy.json', 'w') as write_file: + write_file.write(template_loader.render({'ACCOUNT_ID': account_id, 'AWS_DEFAULT_REGION': aws_region})) + else: + print("AccountId is missing") + if s3_bucket_name: + with open('backend.j2') as backend_file: + template_loader = Template(backend_file.read()) + with open('./backend.tf', 'w') as backend_write_file: + backend_write_file.write(template_loader.render({'AWS_DEFAULT_REGION': aws_region, + 'S3_BUCKET_NAME': s3_bucket_name})) + resource_days = os.environ.get('RESOURCE_DAYS', '7') + slack_api_token = os.environ.get('slack_api_token', '') + slack_channel_name = os.environ.get('SLACK_CHANNEL_NAME', '') + ses_host_address = os.environ.get('SES_HOST_ADDRESS', '') + ses_host_port = int(os.environ.get('SES_HOST_PORT', '0')) + ses_user_id = os.environ.get('SES_USER_ID', '') + ses_password = os.environ.get('SES_PASSWORD', '') + to_addresses = os.environ.get('TO_ADDRESS', '') + cc_addresses = os.environ.get('CC_ADDRESS', '') + send_agg_mail = os.environ.get('SEND_AGG_MAIL') + es_server = os.environ.get('ES_SERVER') + context = f'AWS_DEFAULT_REGION="{aws_region}"' + if resource_days: + context = f'RESOURCE_DAYS="{resource_days}"' + if slack_api_token and slack_channel_name: + context += f'\nSLACK_API_TOKEN="{slack_api_token}"\nSLACK_CHANNEL_NAME="{slack_channel_name}"' + if ses_host_address and ses_host_port and ses_password and ses_password: + context += f'\nSES_HOST_ADDRESS="{ses_host_address}"' \ + f'\nSES_HOST_PORT="{ses_host_port}"' \ + f'\nSES_USER_ID="{ses_user_id}"' \ + f'\nSES_PASSWORD="{ses_password}"' + if to_addresses: + context += f'\nTO_ADDRESS="{to_addresses}"' + if cc_addresses: + context += f'\nCC_ADDRESS="{cc_addresses}"' + if send_agg_mail: + context += f'\nSEND_AGG_MAIL="{send_agg_mail}"' + if es_server: + context += f'\nES_SERVER="{es_server}"' + with open('./input_vars.tfvars', 'w') as write_tf_vars: + write_tf_vars.write(context) + + +inject_variables() diff --git a/cloudsensei/terraform/backend.j2 b/cloudsensei/terraform/backend.j2 new file mode 100644 index 00000000..b6859603 --- /dev/null +++ b/cloudsensei/terraform/backend.j2 @@ -0,0 +1,7 @@ +terraform { + backend "s3" { + bucket = "{{S3_BUCKET_NAME}}" + key = "terraform.tfstate" + region = "us-east-2" + } +} diff --git a/cloudsensei/terraform/event_bridge.tf b/cloudsensei/terraform/event_bridge.tf new file mode 100644 index 00000000..ead4d02d --- /dev/null +++ b/cloudsensei/terraform/event_bridge.tf @@ -0,0 +1,61 @@ +resource "aws_scheduler_schedule_group" "cloud_sensei_group" { + name = "CloudSenseiGroup" + tags = { + User = "cloudsensei" + } +} + + +resource "aws_scheduler_schedule" "cloud_sensi_event_bridge_scheduler" { + name = "CloudSenseiScheduler" + group_name = aws_scheduler_schedule_group.cloud_sensei_group.name + + flexible_time_window { + mode = "OFF" + } + + schedule_expression = "cron(30 16 * * ? *)" + schedule_expression_timezone = "Asia/Kolkata" + target { + arn = module.lambda_function_existing_package_local.lambda_function_arn + role_arn = aws_iam_role.event_bridge_role.arn + } + +} + +resource "aws_iam_role" "event_bridge_role" { + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Sid = "" + Principal = { + Service = "scheduler.amazonaws.com" + }, + }, + ] + }) + inline_policy { + name = "CloudSenseiEventBridgeExecutionPolicy" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = ["lambda:InvokeFunction"] + Effect = "Allow" + Resource = [module.lambda_function_existing_package_local.lambda_function_arn, + "${module.lambda_function_existing_package_local.lambda_function_arn}/*"] + }, + ] + }) + } + + tags = { + User = "cloudsensei" + } + name = "CloudSenseiEvenBrideRole" +} diff --git a/cloudsensei/terraform/lambda.tf b/cloudsensei/terraform/lambda.tf new file mode 100644 index 00000000..76f363e5 --- /dev/null +++ b/cloudsensei/terraform/lambda.tf @@ -0,0 +1,46 @@ +module "lambda_function_existing_package_local" { + source = "terraform-aws-modules/lambda/aws" + lambda_role = aws_iam_role.cloud_sensei_iam_role.arn + function_name = "CloudSensei" + description = "Daily reporting on Cloud Usage" + memory_size = 256 + package_type = "Zip" + tags = { + User = "cloudsensei" + } + timeout = 300 + environment_variables = { + SLACK_API_TOKEN = var.SLACK_API_TOKEN + SLACK_CHANNEL_NAME = var.SLACK_CHANNEL_NAME + RESOURCE_DAYS = var.RESOURCE_DAYS + SES_HOST_ADDRESS = var.SES_HOST_ADDRESS + SES_HOST_PORT = var.SES_HOST_PORT + SES_USER_ID = var.SES_USER_ID + SES_PASSWORD = var.SES_PASSWORD + TO_ADDRESS = var.TO_ADDRESS + CC_ADDRESS = var.CC_ADDRESS + ES_SERVER = var.ES_SERVER + SEND_AGG_MAIL = var.SEND_AGG_MAIL + } + runtime = "python3.9" + local_existing_package = "./../CloudSensei.zip" + handler = "lambda_function.lambda_handler" + create_package = false + create_role = false +} + +# Create Lambda Role Execution policy, with specified resource permissions +resource "aws_iam_role" "cloud_sensei_iam_role" { + + name = "CloudSenseiLambdaRole" + + assume_role_policy = file("./CloudSenseiLambdaRole.json") + inline_policy { + name = "CloudSenseiLambdaPolicy" + policy = file("./CloudSenseiLambdaPolicy.json") + } + tags = { + User = "cloudsensei" + } + +} diff --git a/cloudsensei/terraform/main.tf b/cloudsensei/terraform/main.tf new file mode 100644 index 00000000..45fb3f58 --- /dev/null +++ b/cloudsensei/terraform/main.tf @@ -0,0 +1,3 @@ +provider "aws" { + region = var.AWS_DEFAULT_REGION +} diff --git a/cloudsensei/terraform/variables.tf b/cloudsensei/terraform/variables.tf new file mode 100644 index 00000000..72a592ec --- /dev/null +++ b/cloudsensei/terraform/variables.tf @@ -0,0 +1,61 @@ +variable "SLACK_API_TOKEN" { + type = string + description = "Slack OAuth Token" + default = null +} + +variable "SLACK_CHANNEL_NAME" { + type = string + description = "Slack Channel id/name" + default = null +} + +variable "AWS_DEFAULT_REGION" { + default = "us-east-1" +} + +variable "ACCOUNT_ID" { + default = null +} + +variable "RESOURCE_DAYS" { + type = number + default = 7 +} + +variable "SES_HOST_ADDRESS" { + type = string + default = null +} + +variable "SES_HOST_PORT" { + type = number + default = null +} +variable "SES_USER_ID" { + type = string + default = null +} +variable "SES_PASSWORD" { + type = string + default = null +} + +variable "TO_ADDRESS" { + type = string + default = null +} +variable "CC_ADDRESS" { + type = string + default = null +} + +variable "SEND_AGG_MAIL" { + type = string + default = "no" +} + +variable "ES_SERVER" { + type = string + default = null +} diff --git a/docs/source/index.md b/docs/source/index.md index 09906c4e..57f9beba 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -23,7 +23,7 @@ This tool support the following policies: * [s3_inactive](../../cloud_governance/policy/aws/s3_inactive.py): Get the inactive/empty buckets and delete them after 7 days. * [empty_roles](../../cloud_governance/policy/aws/empty_roles.py): Get empty roles and delete it after 7 days. * [zombie_snapshots](../../cloud_governance/policy/aws/zombie_snapshots.py): Get the zombie snapshots and delete it after 7 days. -* [nat_gateway_unused](../../cloud_governance/policy/aws/nat_gateway_unused.py): Get the unused nat gateways and deletes it after 7 days. +* [nat_gateway_unused](../../cloud_governance/policy/aws/unused_nat_gateway.py): Get the unused nat gateways and deletes it after 7 days. * gitleaks: scan Github repository git leak (security scan) * [cost_over_usage](../../cloud_governance/policy/aws/cost_over_usage.py): send mail to aws user if over usage cost diff --git a/grafana/clouds/aws/cost_explorer_main.json b/grafana/clouds/aws/cost_explorer_main.json new file mode 100644 index 00000000..4b72fc23 --- /dev/null +++ b/grafana/clouds/aws/cost_explorer_main.json @@ -0,0 +1,4673 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 124, + "iteration": 1683606984477, + "links": [ + { + "asDropdown": false, + "icon": "dashboard", + "includeVars": false, + "keepTime": false, + "tags": [], + "targetBlank": true, + "title": "Clouds Payer Dashboards", + "tooltip": "", + "type": "link", + "url": "http://grafana.intlab.perf-infra.lab.eng.rdu2.redhat.com/d/ckeZn1o4k/payer-account-billing-reports?orgId=7&var-CloudName=All&var-Owner=Shai&var-CostCenter=All&var-Account=All&var-AccountId=All" + }, + { + "asDropdown": false, + "icon": "dashboard", + "includeVars": false, + "keepTime": false, + "tags": [], + "targetBlank": true, + "title": "IBM Dashboards", + "tooltip": "", + "type": "link", + "url": "http://grafana.intlab.perf-infra.lab.eng.rdu2.redhat.com/d/dvtz2vHVz/ibm-monthly-invoice-dashboard?orgId=7" + }, + { + "asDropdown": false, + "icon": "external link", + "includeVars": false, + "keepTime": false, + "tags": [], + "targetBlank": true, + "title": "GSheet Links", + "tooltip": "", + "type": "link", + "url": "https://docs.google.com/spreadsheets/d/1EHFGVgjMc9Usl-0QFSKl5UmtMeqAiodQDlBSL9qx2XY/edit#gid=0" + } + ], + "liveNow": false, + "panels": [ + { + "description": "", + "gridPos": { + "h": 8, + "w": 5, + "x": 7, + "y": 0 + }, + "id": 2, + "libraryPanel": { + "description": "", + "meta": { + "connectedDashboards": 5, + "created": "2022-06-17T12:14:14Z", + "createdBy": { + "avatarUrl": "/avatar/094e42d44756239ce2006664467f047b", + "id": 86, + "name": "athiruma" + }, + "folderName": "General", + "folderUid": "", + "updated": "2022-11-23T06:58:09Z", + "updatedBy": { + "avatarUrl": "/avatar/4925d4e6629bfce243ae0033f77c3aa2", + "id": 85, + "name": "ebattat" + } + }, + "name": "Cloud Governance Nightly Report", + "type": "text", + "uid": "E56aJXj7z", + "version": 5 + }, + "options": { + "content": "\n![Cloud Governance](https://github.com/redhat-performance/cloud-governance/blob/main/images/cloud_governance.png?raw=true \"Tooltip Text\")\n", + "mode": "markdown" + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "timestamp", + "id": "2", + "settings": { + "interval": "auto" + }, + "type": "date_histogram" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "I9yrJ19nz" + }, + "metrics": [ + { + "id": "1", + "type": "count" + } + ], + "query": "", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Cloud Governance Nightly Cost Report", + "type": "text" + }, + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 21, + "y": 0 + }, + "id": 121, + "options": { + "content": "![AWS](https://pbs.twimg.com/profile_images/1402754057245138947/Yz4xMoJC_400x400.jpg \"Tooltip Text\")\n", + "mode": "markdown" + }, + "pluginVersion": "8.5.14", + "type": "text" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 28, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "7VSc2zpVz" + }, + "description": "Show last 2 days for showing final cost", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "blue", + "mode": "fixed" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 26, + "options": { + "barRadius": 0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "table", + "placement": "right" + }, + "orientation": "auto", + "showValue": "always", + "stacking": "none", + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelRotation": 0, + "xTickLabelSpacing": 0 + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Budget.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "7VSc2zpVz" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Budget: $Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Cost Per Account: $Account", + "transformations": [ + { + "id": "organize", + "options": {} + } + ], + "type": "barchart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "7VSc2zpVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisGridShow": false, + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 9, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 6, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "always", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 36, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.3.3", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Budget.keyword", + "id": "4", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "timestamp", + "id": "3", + "settings": { + "interval": "auto", + "min_doc_count": "0", + "timeZone": "utc", + "trimEdges": "0" + }, + "type": "date_histogram" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "7VSc2zpVz" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "max" + } + ], + "query": "Budget: $Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "type": "timeseries" + } + ], + "title": "Cost Per Account", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 128, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "LdTmDL2Vk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 130, + "options": { + "barRadius": 0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "table", + "placement": "right" + }, + "orientation": "vertical", + "showValue": "always", + "stacking": "none", + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelRotation": 0, + "xTickLabelSpacing": 200 + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "ChargeType.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "LdTmDL2Vk" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Budget: $Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "$Account Charge Type", + "transformations": [ + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": true, + "field": "Sum" + } + ] + } + } + ], + "type": "barchart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "LdTmDL2Vk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 18 + }, + "id": 131, + "options": { + "displayLabels": [ + "value" + ], + "legend": { + "displayMode": "list", + "placement": "right", + "values": [ + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/^Sum$/", + "values": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "ChargeType.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "LdTmDL2Vk" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Budget: $Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "$Account Charge Type", + "transformations": [ + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": true, + "field": "Sum" + } + ] + } + } + ], + "type": "piechart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "LdTmDL2Vk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "always", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 132, + "options": { + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "list", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "ChargeType.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "timestamp", + "id": "3", + "settings": { + "interval": "auto", + "min_doc_count": "0", + "timeZone": "utc", + "trimEdges": "0" + }, + "type": "date_histogram" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "LdTmDL2Vk" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "max" + } + ], + "query": "Budget: $Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "$Account Charge Type", + "transformations": [ + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": true, + "field": "Sum" + } + ] + } + } + ], + "type": "timeseries" + } + ], + "title": "Account Charge Type", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 137, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "X26Kz1xVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 134, + "options": { + "barRadius": 0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "list", + "placement": "right" + }, + "orientation": "auto", + "showValue": "always", + "stacking": "none", + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelRotation": 0, + "xTickLabelSpacing": 0 + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "PurchaseType.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "X26Kz1xVz" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Budget.keyword= $Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "$Account Purchase Types", + "type": "barchart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "X26Kz1xVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 25, + "gradientMode": "opacity", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "always", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 18 + }, + "id": 135, + "options": { + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "list", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "PurchaseType.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "timestamp", + "id": "3", + "settings": { + "interval": "auto", + "min_doc_count": "0", + "timeZone": "utc", + "trimEdges": "0" + }, + "type": "date_histogram" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "X26Kz1xVz" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "max" + } + ], + "query": "Budget.keyword= $Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "$Account Purchase Types", + "type": "timeseries" + } + ], + "title": "PurchaseTypes", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 12, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "UNThhktVk" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "yellow", + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 28 + }, + "id": 31, + "options": { + "displayLabels": [ + "name" + ], + "legend": { + "displayMode": "table", + "placement": "right", + "values": [ + "percent", + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "limit": 100, + "values": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Project.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "UNThhktVk" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Budget: $Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Cost Per Project: $Account", + "type": "piechart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "UNThhktVk" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "yellow", + "mode": "fixed" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 24, + "x": 0, + "y": 42 + }, + "id": 14, + "options": { + "barRadius": 0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "table", + "placement": "right" + }, + "orientation": "auto", + "showValue": "always", + "stacking": "none", + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelRotation": -45, + "xTickLabelSpacing": 0 + }, + "pluginVersion": "8.3.3", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Project.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "UNThhktVk" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Budget: $Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "$Account", + "transformations": [], + "type": "barchart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "UNThhktVk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisGridShow": false, + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 9, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 6, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "always", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 54 + }, + "id": 39, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.3.3", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Project.keyword", + "id": "4", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + }, + { + "field": "timestamp", + "id": "3", + "settings": { + "interval": "auto", + "min_doc_count": "0", + "timeZone": "utc", + "trimEdges": "0" + }, + "type": "date_histogram" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "UNThhktVk" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "max" + } + ], + "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "$Account", + "type": "timeseries" + } + ], + "title": "Cost Per Project", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 18, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "GqAphkp4z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 29, + "options": { + "displayLabels": [ + "name" + ], + "legend": { + "displayMode": "list", + "placement": "right", + "values": [ + "percent" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Manager.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "GqAphkp4z" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "max" + } + ], + "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Cost Per Manager: $Account", + "type": "piechart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "GqAphkp4z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 20, + "options": { + "barRadius": 0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "table", + "placement": "right" + }, + "orientation": "auto", + "showValue": "always", + "stacking": "none", + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelRotation": -45, + "xTickLabelSpacing": 0 + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Manager.keyword", + "id": "3", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "GqAphkp4z" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Budget=$Account ", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "$Account", + "type": "barchart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "GqAphkp4z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisGridShow": false, + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 9, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 6, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "always", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 35 + }, + "id": 41, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.3.3", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Manager.keyword", + "id": "4", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + }, + { + "field": "timestamp", + "id": "3", + "settings": { + "interval": "auto", + "min_doc_count": "0", + "timeZone": "utc", + "trimEdges": "0" + }, + "type": "date_histogram" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "GqAphkp4z" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "max" + } + ], + "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "$Account", + "type": "timeseries" + } + ], + "title": "Cost Per Manager", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 6, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "XdKgWRtVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "orange", + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "min": 1, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 24, + "x": 0, + "y": 14 + }, + "id": 30, + "options": { + "displayLabels": [ + "name" + ], + "legend": { + "displayMode": "table", + "placement": "right", + "values": [ + "percent", + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "limit": 100, + "values": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "User.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "XdKgWRtVz" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Cost Per Users $Account", + "type": "piechart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "XdKgWRtVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "orange", + "mode": "fixed" + }, + "custom": { + "axisGridShow": true, + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 27 + }, + "id": 8, + "options": { + "barRadius": 0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "table", + "placement": "right" + }, + "orientation": "auto", + "showValue": "always", + "stacking": "none", + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelRotation": -45, + "xTickLabelSpacing": 0 + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "User.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "XdKgWRtVz" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Budget=$Account AND !NoTagKey AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "$Account", + "type": "barchart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "XdKgWRtVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisGridShow": false, + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 9, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 6, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "always", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 37 + }, + "id": 46, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.3.3", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "User.keyword", + "id": "4", + "settings": { + "min_doc_count": "1", + "missing": "User.keyword", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "timestamp", + "id": "3", + "settings": { + "interval": "auto", + "min_doc_count": "0", + "timeZone": "utc", + "trimEdges": "0" + }, + "type": "date_histogram" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "XdKgWRtVz" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "settings": { + "missing": "0" + }, + "type": "max" + } + ], + "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "$Account", + "type": "timeseries" + } + ], + "title": "Cost Per Users", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 14 + }, + "id": 113, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "PQlAtADVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 17, + "x": 0, + "y": 15 + }, + "id": 115, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [], + "datasource": { + "type": "elasticsearch", + "uid": "PQlAtADVz" + }, + "metrics": [ + { + "id": "1", + "settings": { + "size": "500" + }, + "type": "raw_data" + } + ], + "query": "!\"notify_admin\"", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Monthly report", + "transformations": [ + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": false, + "field": "Account.keyword" + } + ] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Account", + "MessageType", + "Policy", + "To", + "timestamp" + ] + } + } + }, + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "string", + "targetField": "MessageType" + } + ], + "fields": {} + } + }, + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "equal", + "options": { + "value": "null" + } + }, + "fieldName": "MessageType" + }, + { + "config": { + "id": "equal", + "options": { + "value": "undefined" + } + }, + "fieldName": "MessageType" + } + ], + "match": "any", + "type": "exclude" + } + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": {}, + "renameByName": { + "Account": "", + "MessageType": "", + "To": "User", + "timestamp": "Alert Dat" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "PQlAtADVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 17, + "x": 0, + "y": 24 + }, + "id": 138, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Policy.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "MessageType.keyword", + "id": "3", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "PQlAtADVz" + }, + "metrics": [ + { + "id": "1", + "type": "count" + } + ], + "query": "", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Monthly report", + "transformations": [], + "type": "table" + } + ], + "title": "Mail Alerts", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 15 + }, + "id": 90, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "KwQ6eOD4z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 96, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [], + "datasource": { + "type": "elasticsearch", + "uid": "KwQ6eOD4z" + }, + "metrics": [ + { + "id": "1", + "settings": { + "size": "500" + }, + "type": "raw_data" + } + ], + "query": "Account=$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Skip resources in $Account", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Region", + "ResourceId", + "ResourceName", + "User" + ] + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": false, + "field": "User" + } + ] + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "KwQ6eOD4z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 7, + "x": 0, + "y": 24 + }, + "id": 94, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": false + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Account.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "KwQ6eOD4z" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Account=$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Cost of skip resources in $Account", + "type": "gauge" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "KwQ6eOD4z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 7, + "x": 7, + "y": 24 + }, + "id": 98, + "options": { + "displayLabels": [ + "name", + "value" + ], + "legend": { + "displayMode": "table", + "placement": "right", + "values": [] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/^Count$/", + "values": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "User.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "KwQ6eOD4z" + }, + "metrics": [ + { + "id": "1", + "type": "count" + } + ], + "query": "Account=$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Skip resources by User in $Account", + "type": "piechart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "KwQ6eOD4z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 14, + "y": 24 + }, + "id": 92, + "options": { + "displayLabels": [ + "name", + "value" + ], + "legend": { + "displayMode": "list", + "placement": "right" + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/^Count$/", + "values": true + }, + "tooltip": { + "mode": "multi", + "sort": "asc" + } + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "ResourceName.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "KwQ6eOD4z" + }, + "metrics": [ + { + "id": "1", + "type": "count" + } + ], + "query": "Account.keyword=$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Skip policy Resources in $Account", + "transformations": [ + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": true, + "field": "Cost" + } + ] + } + } + ], + "type": "piechart" + } + ], + "title": "Skip Policy Resources", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 61, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "VRMNYTR4k" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 45 + }, + "id": 57, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": true + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "8.5.9", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "region.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "VRMNYTR4k" + }, + "metrics": [ + { + "field": "count", + "id": "1", + "type": "max" + } + ], + "query": "policy.keyword:\"ec2_stop\" AND account.keyword:$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "EC2_Stop: $Account: ", + "type": "gauge" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "VRMNYTR4k" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "color-text", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "region.keyword" + }, + "properties": [ + { + "id": "custom.width", + "value": 273 + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 53 + }, + "id": 59, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.5.9", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "ec2_stop.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "region.keyword", + "id": "3", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "VRMNYTR4k" + }, + "metrics": [ + { + "field": "count", + "id": "1", + "type": "sum" + } + ], + "query": "policy.keyword:\"ec2_stop\" AND account.keyword:$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "EC2_Stop: $Account: ", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Sum": true + }, + "indexByName": { + "Sum": 2, + "ec2_stop.keyword": 1, + "region.keyword": 0 + }, + "renameByName": { + "ec2_stop.keyword": "InstanceId | Name | User | LaunchTime | Policy ( Not_Delete )" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "region.keyword" + } + ] + } + } + ], + "type": "table" + } + ], + "title": "Policy: EC2 Stop >= 30 days", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 48, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "VRMNYTR4k" + }, + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green" + }, + { + "color": "orange", + "value": 70 + }, + { + "color": "red", + "value": 85 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 50, + "interval": "1d", + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [], + "fields": "", + "values": true + }, + "showThresholdLabels": false, + "showThresholdMarkers": false + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "region.keyword", + "id": "3", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "VRMNYTR4k" + }, + "hide": false, + "metrics": [ + { + "field": "resources", + "id": "1", + "type": "sum" + } + ], + "query": "policy.keyword:\"ec2_idle\" AND account.keyword:$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "EC2 Idle: $Account", + "type": "gauge" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "VRMNYTR4k" + }, + "fieldConfig": { + "defaults": { + "custom": { + "align": "auto", + "displayMode": "auto", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "resources_list.keyword" + }, + "properties": [ + { + "id": "custom.width", + "value": 1056 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "region.keyword" + }, + "properties": [ + { + "id": "custom.width", + "value": 134 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "timestamp" + }, + "properties": [ + { + "id": "custom.width", + "value": 131 + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 51, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "resources_list.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + }, + { + "field": "region.keyword", + "id": "3", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "VRMNYTR4k" + }, + "hide": false, + "metrics": [ + { + "field": "resources", + "id": "1", + "type": "sum" + } + ], + "query": "account.keyword:$Account AND policy.keyword:\"ec2_idle\"", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "EC2 Idle: : $Account", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Sum": true + }, + "indexByName": { + "Sum": 2, + "region.keyword": 0, + "resources_list.keyword": 1 + }, + "renameByName": { + "region.keyword": "Region", + "resources_list.keyword": " instance id |user | cost($) | state | instance type | launch time | name | cluster owned" + } + } + } + ], + "type": "table" + } + ], + "title": "Policy: EC2-Idle >= 2 days", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 18 + }, + "id": 82, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "2B-r4LTVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 24, + "x": 0, + "y": 18 + }, + "id": 88, + "options": { + "barRadius": 0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "right" + }, + "orientation": "auto", + "showValue": "always", + "stacking": "none", + "tooltip": { + "mode": "multi", + "sort": "asc" + }, + "xTickLabelRotation": 0, + "xTickLabelSpacing": 0 + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "instance_type.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "2B-r4LTVz" + }, + "metrics": [ + { + "field": "instance_count", + "id": "1", + "type": "sum" + } + ], + "query": "account.keyword: $Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "InstancesTypes: $Account", + "transformations": [ + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": true, + "field": "Sum" + } + ] + } + } + ], + "type": "barchart" + } + ], + "title": "InstanceTypes", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 78, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "X3gKVsk4z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "color-text", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "BucketName | CreateDate | Age | Policy" + }, + "properties": [ + { + "id": "custom.width", + "value": 603 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Account" + }, + "properties": [ + { + "id": "custom.width", + "value": 103 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Count" + }, + "properties": [ + { + "id": "custom.width", + "value": 181 + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 15 + }, + "id": 65, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.5.9", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "empty_buckets.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "account.keyword", + "id": "3", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "X3gKVsk4z" + }, + "metrics": [ + { + "field": "count", + "id": "1", + "type": "min" + } + ], + "query": "account.keyword:$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Empty Buckets: $Account", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Count": true + }, + "indexByName": {}, + "renameByName": { + "account.keyword": "Account", + "empty_buckets.keyword": "BucketName | CreateDate | Age | Policy" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "Account" + } + ] + } + } + ], + "type": "table" + } + ], + "title": "Policy: Empty Bucket", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 20 + }, + "id": 69, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "ZL9Ev7k4k" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 73, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "8.5.9", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "zombie_snapshots.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "account.keyword", + "id": "3", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "region.keyword", + "id": "4", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "ZL9Ev7k4k" + }, + "metrics": [ + { + "field": "count", + "id": "1", + "type": "max" + } + ], + "query": "account.keyword=$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Zombie Snapshots", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Max": true + }, + "indexByName": {}, + "renameByName": { + "account.keyword": "Account", + "region.keyword": "Region", + "zombie_snapshots.keyword": "SnapshotId| Name | User | VolumeSize | Policy" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "Account" + } + ] + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "ZL9Ev7k4k" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "color-text", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "NatGatewayId | user | VpcId | Policy" + }, + "properties": [ + { + "id": "custom.width", + "value": 585 + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 71, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.5.9", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "zombie_nat_gateways.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "region.keyword", + "id": "3", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + }, + { + "field": "account.keyword", + "id": "4", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "ZL9Ev7k4k" + }, + "metrics": [ + { + "field": "count", + "id": "1", + "type": "sum" + } + ], + "query": "account.keyword=$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Zombie NatGateways: $Account", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Max": true + }, + "indexByName": {}, + "renameByName": { + "account.keyword": "Account", + "region.keyword": "Region", + "zombie_nat_gateways.keyword": "NatGatewayId | user | VpcId | Policy" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "Account" + } + ] + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "ZL9Ev7k4k" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "zombie_elastic_ips.keyword" + }, + "properties": [ + { + "id": "custom.width", + "value": 927 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "account.keyword" + }, + "properties": [ + { + "id": "custom.width", + "value": 207 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "AllocationId | Name | PublicIp | Policy" + }, + "properties": [ + { + "id": "custom.width", + "value": 567 + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 67, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.5.9", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "zombie_elastic_ips.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "account.keyword", + "id": "3", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "region.keyword", + "id": "4", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "ZL9Ev7k4k" + }, + "metrics": [ + { + "field": "count", + "id": "1", + "type": "max" + } + ], + "query": "account.keyword=$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Zombie ElasticIps", + "transformations": [ + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "account.keyword" + } + ] + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Max": true + }, + "indexByName": {}, + "renameByName": { + "account.keyword": "Account", + "region.keyword": "Region", + "zombie_elastic_ips.keyword": "AllocationId | Name | PublicIp | Policy" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "X3gKVsk4z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "color-text", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 43 + }, + "id": 63, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "8.5.9", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "empty_roles.keyword", + "id": "3", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "account.keyword", + "id": "4", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "X3gKVsk4z" + }, + "metrics": [ + { + "field": "count", + "id": "1", + "type": "max" + } + ], + "query": "account.keyword=$Account", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Empty Roles", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Max": true + }, + "indexByName": {}, + "renameByName": { + "account.keyword": "Account", + "empty_roles.keyword": "RoleName | Policy" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "Account" + } + ] + } + } + ], + "type": "table" + } + ], + "title": "Policy: Zombie & Empty Resources", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 24, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "D1FohzpVk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 135 + }, + "id": 22, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": true + }, + "showHeader": true + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Email.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "D1FohzpVk" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Cost Per Email: $Account", + "type": "table" + } + ], + "title": "Email", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 22 + }, + "id": 34, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "eszb2ktVk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 22 + }, + "id": 35, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Name.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "1", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "eszb2ktVk" + }, + "metrics": [ + { + "field": "Cost", + "id": "1", + "type": "sum" + } + ], + "query": "Budget.keyword: $Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Cost Per Resource Names: $Account", + "type": "table" + } + ], + "title": "Resource Names", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 23 + }, + "id": 45, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "gSgjheqnz" + }, + "description": "Spreadsheet tags", + "fieldConfig": { + "defaults": { + "custom": { + "align": "center", + "displayMode": "color-text", + "filterable": false, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "MissingTags" + }, + "properties": [ + { + "id": "custom.width", + "value": 552 + } + ] + } + ] + }, + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 23 + }, + "id": 43, + "links": [ + { + "title": "User tags", + "url": "https://docs.google.com/spreadsheets/d/1KEFd1e1z03c9Ai7LyX7IoBtLGTiKhSKEzLMuwUTAQUY/edit#gid=0" + } + ], + "options": { + "footer": { + "fields": "", + "reducer": [ + "allValues" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [], + "datasource": { + "type": "elasticsearch", + "uid": "gSgjheqnz" + }, + "metrics": [ + { + "id": "1", + "settings": { + "size": "500" + }, + "type": "raw_data" + } + ], + "query": "", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Pef-Dept: missing tags ", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "_id": true, + "_index": true, + "_type": true, + "highlight": true, + "sort": true, + "timestamp": true + }, + "indexByName": {}, + "renameByName": {} + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "MissingTags": { + "aggregations": [ + "last" + ], + "operation": "aggregate" + }, + "User": { + "aggregations": [], + "operation": "groupby" + } + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "cn0DL467z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "align": "auto", + "displayMode": "color-text", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 27 + }, + "id": 53, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [], + "datasource": { + "type": "elasticsearch", + "uid": "cn0DL467z" + }, + "metrics": [ + { + "id": "1", + "settings": { + "size": "500" + }, + "type": "raw_data" + } + ], + "query": "", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "PSAP: missing tags ", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "MissingTags", + "User" + ] + } + } + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": { + "MissingTags": 1, + "User": 0 + }, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "pStF2V67k" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "color-text", + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 24, + "x": 0, + "y": 33 + }, + "id": 55, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [], + "datasource": { + "type": "elasticsearch", + "uid": "pStF2V67k" + }, + "metrics": [ + { + "id": "1", + "settings": { + "size": "500" + }, + "type": "raw_data" + } + ], + "query": "", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Perf-Scale: missing tags ", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "MissingTags", + "User" + ] + } + } + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": { + "MissingTags": 1, + "User": 0 + }, + "renameByName": {} + } + } + ], + "type": "table" + } + ], + "title": "User: missing tags", + "type": "row" + } + ], + "refresh": false, + "schemaVersion": 36, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": "PERF-DEPT, PSAP, PERFSCALE", + "current": { + "selected": true, + "text": [ + "PERF-DEPT" + ], + "value": [ + "PERF-DEPT" + ] + }, + "datasource": { + "type": "elasticsearch", + "uid": "7VSc2zpVz" + }, + "definition": "{\"find\":\"terms\",\"field\":\"Budget.keyword\"}", + "description": "Account", + "hide": 0, + "includeAll": true, + "label": "Account", + "multi": true, + "name": "Account", + "options": [], + "query": "{\"find\":\"terms\",\"field\":\"Budget.keyword\"}", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-7d", + "to": "now" + }, + "timepicker": {}, + "timezone": "utc", + "title": "Cost Explorer-Main", + "uid": "3vqJes5Vk", + "version": 60, + "weekStart": "" +} \ No newline at end of file diff --git a/grafana/clouds/aws/payer_account_billing_reports.json b/grafana/clouds/aws/payer_account_billing_reports.json new file mode 100644 index 00000000..2e121d39 --- /dev/null +++ b/grafana/clouds/aws/payer_account_billing_reports.json @@ -0,0 +1,1228 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Forecasting for the Perf-Dept, Openshift-PerfScale, OPenshift-PSAP", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 130, + "iteration": 1683607032287, + "links": [], + "liveNow": true, + "panels": [ + { + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "noValue": "0", + "unit": "currencyUSD" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Balance" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#b157ff", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 38, + "options": { + "displayLabels": [ + "value" + ], + "legend": { + "displayMode": "table", + "placement": "right", + "values": [ + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "sum" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "multi", + "sort": "asc" + } + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Account.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "Actual", + "id": "3", + "settings": { + "min_doc_count": "1", + "missing": "0", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "SavingsPlanCost", + "id": "4", + "settings": { + "min_doc_count": "1", + "missing": "0", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + }, + { + "field": "PremiumSupportFee", + "id": "5", + "settings": { + "min_doc_count": "1", + "missing": "0", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "metrics": [ + { + "field": "Budget", + "id": "1", + "type": "sum" + } + ], + "query": "(Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName: $CloudName AND filter_date.keyword: $Month)", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Total $CloudName/ $Month", + "transformations": [ + { + "id": "calculateField", + "options": { + "alias": "Balance", + "binary": { + "left": "Sum", + "operator": "-", + "reducer": "sum", + "right": "Actual" + }, + "mode": "binary", + "reduce": { + "reducer": "sum" + }, + "replaceFields": false + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Account.keyword": false, + "Actual": true, + "CurrentCost": true, + "PremiumSupportFee": false, + "Sum": false + }, + "indexByName": {}, + "renameByName": { + "Balance": "", + "Remaining Cost": "", + "Sum": "Budget" + } + } + }, + { + "id": "calculateField", + "options": { + "alias": "Actual", + "binary": { + "left": "Budget", + "operator": "-", + "reducer": "sum", + "right": "Balance" + }, + "mode": "binary", + "reduce": { + "reducer": "sum" + }, + "replaceFields": false + } + }, + { + "id": "calculateField", + "options": { + "alias": "Total", + "mode": "reduceRow", + "reduce": { + "include": [ + "SavingsPlanCost", + "PremiumSupportFee", + "Actual" + ], + "reducer": "sum" + } + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Actual": true, + "PremiumSupportFee": true, + "SavingsPlanCost": true + }, + "indexByName": {}, + "renameByName": {} + } + } + ], + "type": "piechart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "currencyUSD" + }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 40, + "options": { + "displayLabels": [ + "value" + ], + "legend": { + "displayMode": "list", + "placement": "right", + "values": [ + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "CloudName.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "missing": "0", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "metrics": [ + { + "field": "Actual", + "id": "1", + "settings": { + "missing": "0" + }, + "type": "sum" + }, + { + "field": "SavingsPlanCost", + "id": "3", + "settings": { + "missing": "0" + }, + "type": "sum" + }, + { + "field": "PremiumSupportFee", + "id": "4", + "settings": { + "missing": "0" + }, + "type": "sum" + } + ], + "query": "CloudName.keyword: $CloudName AND Owner.keyword: $Owner AND CostCenter: $CostCenter AND Account.keyword: $Account AND filter_date.keyword: $Month", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Actual $CloudName: Cost / $Month", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": {}, + "renameByName": { + "CloudName.keyword": "CloudName", + "Sum Actual": "Actual", + "Sum PremiumSupportFee": "Support", + "Sum SavingsPlanCost": "Savings" + } + } + }, + { + "id": "calculateField", + "options": { + "alias": "Total", + "mode": "reduceRow", + "reduce": { + "include": [ + "Actual", + "Savings", + "Support" + ], + "reducer": "sum" + } + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Actual": true, + "Savings": true, + "Support": true + }, + "indexByName": {}, + "renameByName": {} + } + } + ], + "type": "piechart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Month.keyword" + }, + "properties": [ + { + "id": "custom.axisWidth", + "value": 2 + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 41, + "options": { + "barRadius": 0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "table", + "placement": "right" + }, + "orientation": "auto", + "showValue": "always", + "stacking": "none", + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelMaxLength": 0, + "xTickLabelRotation": 0, + "xTickLabelSpacing": 0 + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "filter_date.keyword", + "id": "11", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "metrics": [ + { + "field": "Actual", + "id": "1", + "settings": { + "missing": "0" + }, + "type": "sum" + }, + { + "field": "Budget", + "id": "8", + "settings": { + "missing": "0" + }, + "type": "sum" + }, + { + "field": "Forecast", + "id": "9", + "settings": { + "missing": "0" + }, + "type": "sum" + }, + { + "field": "PremiumSupportFee", + "id": "12", + "settings": { + "missing": "0" + }, + "type": "sum" + }, + { + "field": "SavingsPlanCost", + "id": "13", + "settings": { + "missing": "0" + }, + "type": "sum" + } + ], + "query": "Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName: $CloudName AND filter_date.keyword: $Month", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": " $Account Forecast, Budget, Actual / $Month", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Budget": false, + "TotalValues": true + }, + "indexByName": { + "Account.keyword": 0, + "Budget": 3, + "ForecastCost": 2, + "Month.keyword": 1, + "Sum": 4 + }, + "renameByName": { + "Account.keyword": "Account", + "Budget": "", + "ForecastCost": "Estimated Cost", + "Month.keyword": "Month", + "Sum": "CurrentCost", + "Sum Actual": "Usage", + "Sum Budget": "Budget", + "Sum CurrentCost": "Actual", + "Sum Forecast": "Forecast", + "Sum ForecastCost": "ForeCast", + "Sum PremiumSupportFee": "PremiumSupportFee", + "Sum SavingsPlanCost": "SavingsPlanCost", + "filter_date.keyword": "Month" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": false, + "field": "Month" + } + ] + } + } + ], + "type": "barchart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisGridShow": true, + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Sum" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-yellow", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 23 + }, + "id": 29, + "options": { + "barRadius": 0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "table", + "placement": "right" + }, + "orientation": "vertical", + "showValue": "always", + "stacking": "none", + "tooltip": { + "mode": "multi", + "sort": "none" + }, + "xTickLabelRotation": 0, + "xTickLabelSpacing": 0 + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Account.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "metrics": [ + { + "field": "Actual", + "id": "1", + "settings": { + "missing": "0" + }, + "type": "sum" + } + ], + "query": "(Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName.keyword: $CloudName AND filter_date.keyword: $Month)", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "Current Usage / $Account - Till Now \\ $Month", + "transformations": [ + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "Sum" + } + ] + } + } + ], + "transparent": true, + "type": "barchart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 35, + "options": { + "barRadius": 0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "table", + "placement": "right" + }, + "orientation": "vertical", + "showValue": "always", + "stacking": "none", + "text": {}, + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelRotation": 0, + "xTickLabelSpacing": 0 + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Account.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "metrics": [ + { + "field": "AllocatedBudget", + "id": "1", + "type": "max" + } + ], + "query": "(Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName.keyword: $CloudName)", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "CY23 Budget / $Account", + "transformations": [ + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "Max" + } + ] + } + } + ], + "type": "barchart" + }, + { + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "description": "IBM has only 1 current Month Forecast", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisGridShow": true, + "axisLabel": "", + "axisPlacement": "left", + "axisSoftMin": 0, + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "currencyUSD" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Sum" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 42 + }, + "id": 32, + "options": { + "barRadius": 0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [ + "sum" + ], + "displayMode": "table", + "placement": "right" + }, + "orientation": "vertical", + "showValue": "always", + "stacking": "none", + "tooltip": { + "mode": "multi", + "sort": "none" + }, + "xTickLabelRotation": 0, + "xTickLabelSpacing": 0 + }, + "pluginVersion": "8.5.14", + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "Account.keyword", + "id": "2", + "settings": { + "min_doc_count": "1", + "order": "desc", + "orderBy": "_term", + "size": "0" + }, + "type": "terms" + } + ], + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "metrics": [ + { + "field": "Forecast", + "id": "1", + "type": "sum" + } + ], + "query": "(Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName.keyword: $CloudName)", + "refId": "A", + "timeField": "timestamp" + } + ], + "title": "ForeCasted next 12 M / $Account", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": {}, + "renameByName": { + "Account.keyword": "Account", + "Sum": "Sum" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "Sum" + } + ] + } + } + ], + "type": "barchart" + } + ], + "refresh": "", + "schemaVersion": 36, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": [ + "Shai" + ], + "value": [ + "Shai" + ] + }, + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "definition": "{\"find\":\"terms\", \"field\":\"Owner.keyword\"}", + "hide": 0, + "includeAll": true, + "label": "Owner", + "multi": true, + "name": "Owner", + "options": [], + "query": "{\"find\":\"terms\", \"field\":\"Owner.keyword\"}", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": { + "selected": false, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "definition": "{\"find\":\"terms\", \"field\":\"CostCenter\", \"query\": \"Owner.keyword: $Owner\"}", + "description": "CostCategory", + "hide": 0, + "includeAll": true, + "label": "CostCenter", + "multi": true, + "name": "CostCenter", + "options": [], + "query": "{\"find\":\"terms\", \"field\":\"CostCenter\", \"query\": \"Owner.keyword: $Owner\"}", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": { + "selected": false, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "definition": "{\"find\":\"terms\", \"field\":\"CloudName.keyword\", \"query\": \"CostCenter: $CostCenter AND Owner.keyword: $Owner\"}", + "description": "CloudName", + "hide": 0, + "includeAll": true, + "label": "CloudName", + "multi": true, + "name": "CloudName", + "options": [], + "query": "{\"find\":\"terms\", \"field\":\"CloudName.keyword\", \"query\": \"CostCenter: $CostCenter AND Owner.keyword: $Owner\"}", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": "", + "current": { + "selected": false, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "definition": "{\"find\":\"terms\", \"field\":\"Account.keyword\", \"query\": \"CloudName.keyword: $CloudName AND CostCenter: $CostCenter AND Owner.keyword: $Owner AND !(AccountId.keyword=\\\"\\\") \" }", + "description": "Account", + "hide": 0, + "includeAll": true, + "label": "Account", + "multi": true, + "name": "Account", + "options": [], + "query": "{\"find\":\"terms\", \"field\":\"Account.keyword\", \"query\": \"CloudName.keyword: $CloudName AND CostCenter: $CostCenter AND Owner.keyword: $Owner AND !(AccountId.keyword=\\\"\\\") \" }", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": "", + "current": { + "selected": false, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "definition": "{\"find\":\"terms\", \"field\":\"AccountId.keyword\", \"query\": \"CloudName.keyword: $CloudName AND CostCenter: $CostCenter AND Owner.keyword: $Owner AND Account.keyword: $Account\" }", + "description": "AccountId", + "hide": 0, + "includeAll": true, + "label": "AccountId", + "multi": true, + "name": "AccountId", + "options": [], + "query": "{\"find\":\"terms\", \"field\":\"AccountId.keyword\", \"query\": \"CloudName.keyword: $CloudName AND CostCenter: $CostCenter AND Owner.keyword: $Owner AND Account.keyword: $Account\" }", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": "", + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "elasticsearch", + "uid": "NvnUAH04z" + }, + "definition": "{\"find\":\"terms\", \"field\":\"filter_date.keyword\", \"query\": \"Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName: $CloudName\"}", + "description": "Month", + "hide": 0, + "includeAll": true, + "label": "Month", + "multi": true, + "name": "Month", + "options": [], + "query": "{\"find\":\"terms\", \"field\":\"filter_date.keyword\", \"query\": \"Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName: $CloudName\"}", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now/y", + "to": "now/y" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "utc", + "title": "Payer Account Billing Reports", + "uid": "ckeZn1o4k", + "version": 59, + "weekStart": "" +} \ No newline at end of file diff --git a/iam/clouds/aws/CloudGovernanceDeletePolicy.json b/iam/clouds/aws/CloudGovernanceDeletePolicy.json new file mode 100644 index 00000000..7bca5146 --- /dev/null +++ b/iam/clouds/aws/CloudGovernanceDeletePolicy.json @@ -0,0 +1,169 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "CostExplorer", + "Effect": "Allow", + "Action": [ + "ce:GetCostAndUsage", + "ce:GetCostForecast" + ], + "Resource": "*" + }, + { + "Sid": "EC2AccountLevel", + "Effect": "Allow", + "Action": [ + "ec2:DeleteTags", + "ec2:CreateTags" + ], + "Resource": [ + "arn:aws:ec2:*:account_id:instance/*", + "arn:aws:ec2:*:account_id:route-table/*", + "arn:aws:ec2:*:account_id:network-interface/*", + "arn:aws:ec2:*:account_id:internet-gateway/*", + "arn:aws:ec2:*:account_id:dhcp-options/*", + "arn:aws:ec2:*::snapshot/*", + "arn:aws:ec2:*:account_id:vpc/*", + "arn:aws:ec2:*:account_id:elastic-ip/*", + "arn:aws:ec2:*:account_id:network-acl/*", + "arn:aws:ec2:*:account_id:natgateway/*", + "arn:aws:ec2:*:account_id:security-group/*", + "arn:aws:ec2:*:account_id:vpc-endpoint/*", + "arn:aws:ec2:*:account_id:subnet/*", + "arn:aws:ec2:*:account_id:volume/*", + "arn:aws:ec2:*::image/*" + ] + }, + { + "Sid": "EC2ResourceLevel", + "Effect": "Allow", + "Action": [ + "ec2:DeregisterImage", + "ec2:DeleteSubnet", + "ec2:DeleteSnapshot", + "ec2:DescribeAddresses", + "ec2:DescribeInstances", + "ec2:DeleteVpcEndpoints", + "ec2:DeleteVpcPeeringConnection", + "autoscaling:DescribeLaunchConfigurations", + "ec2:DescribeRegions", + "ec2:CreateImage", + "ec2:CreateVpc", + "ec2:DescribeDhcpOptions", + "ec2:DescribeSnapshots", + "ec2:DeleteRouteTable", + "ec2:DescribeInternetGateways", + "ec2:DeleteVolume", + "ec2:DescribeNetworkInterfaces", + "autoscaling:DescribeAutoScalingGroups", + "ec2:DescribeVolumes", + "ec2:DeleteInternetGateway", + "ec2:DescribeNetworkAcls", + "ec2:DescribeRouteTables", + "ec2:DeleteNetworkAcl", + "ec2:ReleaseAddress", + "ec2:AssociateDhcpOptions", + "ec2:TerminateInstances", + "ec2:DetachNetworkInterface", + "ec2:DescribeTags", + "ec2:DescribeVpcPeeringConnections", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DeleteNetworkInterface", + "ec2:DetachInternetGateway", + "ec2:DescribeNatGateways", + "cloudwatch:GetMetricStatistics", + "ec2:StopInstances", + "ec2:DisassociateRouteTable", + "ec2:DescribeSecurityGroups", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeImages", + "ec2:DescribeVpcs", + "ec2:DeleteSecurityGroup", + "ec2:DescribeInstanceTypes", + "ec2:DeleteDhcpOptions", + "ec2:DeleteNatGateway", + "ec2:DescribeVpcEndpoints", + "ec2:DeleteVpc", + "ec2:DescribeSubnets" + ], + "Resource": "*" + }, + { + "Sid": "LoadBalancer", + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:DescribeLoadBalancers" + ], + "Resource": "*" + }, + { + "Sid": "IAM", + "Effect": "Allow", + "Action": [ + "iam:GetRole", + "iam:DeleteAccessKey", + "iam:DeleteGroup", + "iam:TagRole", + "iam:DeleteUserPolicy", + "iam:ListRoles", + "iam:DeleteUser", + "iam:ListUserPolicies", + "iam:CreateUser", + "iam:TagUser", + "sts:AssumeRole", + "iam:RemoveUserFromGroup", + "iam:GetUserPolicy", + "iam:ListAttachedRolePolicies", + "iam:ListUsers", + "iam:GetUser", + "iam:ListAccessKeys", + "iam:ListRolePolicies", + "iam:ListAccountAliases" + ], + "Resource": "*" + }, + { + "Sid": "Pricing", + "Effect": "Allow", + "Action": "pricing:GetProducts", + "Resource": "*" + }, + { + "Sid": "S3Bucket", + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListAllMyBuckets", + "s3:CreateBucket", + "s3:ListBucket", + "s3:PutObjectTagging", + "s3:DeleteObject", + "s3:DeleteBucket", + "s3:putBucketTagging", + "s3:GetBucketTagging", + "s3:GetBucketLocation" + ], + "Resource": "*" + }, + { + "Sid": "CloudTrail", + "Effect": "Allow", + "Action": [ + "cloudtrail:LookupEvents", + "cloudtrail:ListTrails" + ], + "Resource": "*" + }, + { + "Sid": "CloudWatch", + "Effect": "Allow", + "Action": "cloudwatch:GetMetricData", + "Resource": "*" + } + ] +} diff --git a/iam/clouds/aws/delete/CloudGovernanceEC2Policy b/iam/clouds/aws/delete/CloudGovernanceEC2Policy index 5d6f676a..b7310a80 100644 --- a/iam/clouds/aws/delete/CloudGovernanceEC2Policy +++ b/iam/clouds/aws/delete/CloudGovernanceEC2Policy @@ -30,42 +30,53 @@ "Sid": "VisualEditor1", "Effect": "Allow", "Action": [ + "ec2:DeregisterImage", + "ec2:DeleteSubnet", + "ec2:DeleteSnapshot", "ec2:DescribeAddresses", "ec2:DescribeInstances", - "ec2:DescribeTags", + "ec2:DeleteVpcEndpoints", + "ec2:DeleteVpcPeeringConnection", + "autoscaling:DescribeLaunchConfigurations", "ec2:DescribeRegions", + "ec2:CreateImage", + "ec2:CreateVpc", "ec2:DescribeDhcpOptions", - "ec2:DescribeNatGateways", - "cloudwatch:GetMetricStatistics", "ec2:DescribeSnapshots", - "ec2:DescribeSecurityGroups", - "ec2:DescribeImages", + "ec2:DeleteRouteTable", "ec2:DescribeInternetGateways", + "ec2:DeleteVolume", "ec2:DescribeNetworkInterfaces", - "ec2:DescribeVpcs", + "autoscaling:DescribeAutoScalingGroups", "ec2:DescribeVolumes", - "ec2:DescribeVpcEndpoints", - "ec2:DescribeSubnets", + "ec2:DeleteInternetGateway", "ec2:DescribeNetworkAcls", "ec2:DescribeRouteTables", - "ec2:DeleteNatGateway", - "ec2:DetachInternetGateway", - "ec2:DeleteInternetGateway", + "ec2:DeleteNetworkAcl", + "ec2:ReleaseAddress", "ec2:AssociateDhcpOptions", - "ec2:DeleteDhcpOptions", + "ec2:TerminateInstances", + "ec2:DetachNetworkInterface", + "ec2:DescribeTags", + "ec2:DescribeVpcPeeringConnections", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DeleteNetworkInterface", + "ec2:DetachInternetGateway", + "ec2:DescribeNatGateways", + "cloudwatch:GetMetricStatistics", + "ec2:StopInstances", + "ec2:DisassociateRouteTable", + "ec2:DescribeSecurityGroups", "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeImages", + "ec2:DescribeVpcs", "ec2:DeleteSecurityGroup", - "ec2:DeleteRouteTable", - "ec2:DisassociateRouteTable", - "ec2:ReleaseAddress", - "ec2:DeleteSubnet", + "ec2:DescribeInstanceTypes", + "ec2:DeleteDhcpOptions", + "ec2:DeleteNatGateway", + "ec2:DescribeVpcEndpoints", "ec2:DeleteVpc", - "ec2:DeleteVpcEndpoints", - "ec2:DetachNetworkInterface", - "ec2:DeleteNetworkInterface", - "ec2:ModifyNetworkInterfaceAttribute", - "ec2:DeleteNetworkAcl", - "ec2:createVpc" + "ec2:DescribeSubnets" ], "Resource": "*" } diff --git a/iam/clouds/aws/delete/CloudGovernanceS3Policy b/iam/clouds/aws/delete/CloudGovernanceS3Policy index 77c1afff..b22fb251 100644 --- a/iam/clouds/aws/delete/CloudGovernanceS3Policy +++ b/iam/clouds/aws/delete/CloudGovernanceS3Policy @@ -14,7 +14,8 @@ "s3:DeleteObject", "s3:DeleteBucket", "s3:putBucketTagging", - "s3:GetBucketTagging" + "s3:GetBucketTagging", + "s3:GetBucketLocation" ], "Resource": "*" } diff --git a/iam/clouds/aws/not_delete/CloudGovernanceEC2Policy b/iam/clouds/aws/not_delete/CloudGovernanceEC2Policy index 8b27c651..09d89c17 100644 --- a/iam/clouds/aws/not_delete/CloudGovernanceEC2Policy +++ b/iam/clouds/aws/not_delete/CloudGovernanceEC2Policy @@ -33,6 +33,7 @@ "ec2:DescribeAddresses", "ec2:DescribeInstances", "ec2:DescribeTags", + "ec2:DescribeVpcPeeringConnections", "ec2:DescribeRegions", "ec2:DescribeDhcpOptions", "ec2:DescribeNatGateways", @@ -44,7 +45,10 @@ "ec2:DescribeNetworkInterfaces", "ec2:DescribeVpcs", "ec2:DescribeVolumes", + "ec2:DescribeInstanceTypes", + "ec2:createVpc", "ec2:DescribeVpcEndpoints", + "ec2:DeleteVpc", "ec2:DescribeSubnets", "ec2:DescribeNetworkAcls", "ec2:DescribeRouteTables" diff --git a/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadPolicy.json b/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadPolicy.json new file mode 100644 index 00000000..a244a577 --- /dev/null +++ b/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadPolicy.json @@ -0,0 +1,44 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "cur:*", + "ce:*", + "account:GetAccountInformation", + "aws-portal:ViewBilling", + "billing:GetBillingData", + "billing:GetBillingDetails", + "billing:GetBillingNotifications", + "billing:GetBillingPreferences", + "billing:GetCredits", + "billing:GetContractInformation", + "billing:GetIAMAccessPreference", + "billing:GetSellerOfRecord", + "billing:ListBillingViews", + "consolidatedbilling:ListLinkedAccounts", + "consolidatedbilling:GetAccountBillingRole", + "freetier:GetFreeTierAlertPreference", + "freetier:GetFreeTierUsage", + "invoicing:GetInvoiceEmailDeliveryPreferences", + "invoicing:GetInvoicePDF", + "invoicing:ListInvoiceSummaries", + "payments:GetPaymentInstrument", + "payments:GetPaymentStatus", + "payments:ListPaymentPreferences", + "purchase-orders:GetPurchaseOrder", + "purchase-orders:ViewPurchaseOrders", + "purchase-orders:ListPurchaseOrderInvoices", + "purchase-orders:ListPurchaseOrders", + "tax:GetTaxRegistrationDocument", + "tax:GetTaxInheritance", + "tax:ListTaxRegistrations", + "savingsplans:Describe*", + "savingsplans:List*" + ], + "Resource": "*" + } + ] +} \ No newline at end of file diff --git a/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadRole.json b/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadRole.json new file mode 100644 index 00000000..49005e92 --- /dev/null +++ b/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadRole.json @@ -0,0 +1,14 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": [ + "arn:aws:iam::452958939641:user/athiruma" + ] + }, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file diff --git a/iam/clouds/aws/payer_roles/README.md b/iam/clouds/aws/payer_roles/README.md new file mode 100644 index 00000000..2c3f768c --- /dev/null +++ b/iam/clouds/aws/payer_roles/README.md @@ -0,0 +1,57 @@ +## Create IAM Assume Role + +# From AWS Console + +### Go to **IAM** Service + +#### Create a IAM Policy + +1. Click on **Policies** +2. Click on **Create Policy** +3. Switch to JSON tab and paste the contents of CloudGovernanceCostExplorerReadPolicy. +4. Click on Next: Tags +5. Click on Next: Review +6. Enter the Policy name as CloudGovernanceCostExplorerReadPolicy +7. Click on **Create Policy**. ( Policy will be created and listed on Policies ) + +#### Create the IAM Role + +1. Click on **Roles** +2. Click on **Create Role** +3. Select the **Custom trust policy** from Trusted identity type. +4. Paste the contents of *CloudGovernanceCostExplorerReadRole.json* file. \ +  Note: Replace username with **IAM User** name, and AccountId with the **AWS AccountId** +5. Select the **CloudGovernanceCostExplorerReadPolicy** from the list of policies. +6. Enter the RoleName as **CloudGovernanceCostExplorerReadRole** +7. Click on create role. ( Role will be created and listed on roles ) + + +## From Terraform provider + +Clone our GitHub repository or copy the folder of **payer_roles**. +if you clone repo path: iam/clouds/aws/payer_roles/terrafom_create_role/ +else path: payer_roles/terrafom_create_role/main.tf + +Go to folder terraform_create_role and open the terminal. + +Configure the aws cli credentials +```commandline +aws configure +``` + +Install [Terraform](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli) if you don't have it previously + +Run the following commands to create the IAM Role and attach policy to it. +```commandline +terraform init +terraform apply +``` + +Note: Replace username with **IAM User** name, and AccountId with the **AWS AccountId** on the CloudGovernanceCostExplorerReadRole + +Then share your **AccountId** and **Role Name** to the users for accessing the CostExplorer. + +To delete the IAM policy through terraform. +```commandline +terraform delete +``` \ No newline at end of file diff --git a/iam/clouds/aws/payer_roles/terrafom_create_role/main.tf b/iam/clouds/aws/payer_roles/terrafom_create_role/main.tf new file mode 100644 index 00000000..b3e7dd03 --- /dev/null +++ b/iam/clouds/aws/payer_roles/terrafom_create_role/main.tf @@ -0,0 +1,24 @@ + +provider "aws" { + +} + +output "role_arn" { + value = aws_iam_role.cloud_governance_ce_read_role.arn +} + +output "role_name" { + value = aws_iam_role.cloud_governance_ce_read_role.name +} + +resource "aws_iam_role" "cloud_governance_ce_read_role" { + + name = "CloudGovernanceCostExplorerReadRole" + + assume_role_policy = file("./../CloudGovernanceCostExplorerReadRole.json") + inline_policy { + name = "CloudGovernanceCostExplorerReadPolicy" + policy = file("./../CloudGovernanceCostExplorerReadPolicy.json") + } + +} diff --git a/images/CloudResourceOrchestration.jpg b/images/CloudResourceOrchestration.jpg new file mode 100644 index 00000000..bc9b46cb Binary files /dev/null and b/images/CloudResourceOrchestration.jpg differ diff --git a/images/jenkins/add_creds.png b/images/jenkins/add_creds.png new file mode 100644 index 00000000..b6af48a1 Binary files /dev/null and b/images/jenkins/add_creds.png differ diff --git a/images/jenkins/jenkins_config_file.png b/images/jenkins/jenkins_config_file.png new file mode 100644 index 00000000..7414b002 Binary files /dev/null and b/images/jenkins/jenkins_config_file.png differ diff --git a/images/jenkins/manage_jenkins.png b/images/jenkins/manage_jenkins.png new file mode 100644 index 00000000..07c00312 Binary files /dev/null and b/images/jenkins/manage_jenkins.png differ diff --git a/images/jenkins/manage_nodes.png b/images/jenkins/manage_nodes.png new file mode 100644 index 00000000..a601d85c Binary files /dev/null and b/images/jenkins/manage_nodes.png differ diff --git a/images/jenkins/new_node.png b/images/jenkins/new_node.png new file mode 100644 index 00000000..99b94cfd Binary files /dev/null and b/images/jenkins/new_node.png differ diff --git a/images/jenkins/slave_node.png b/images/jenkins/slave_node.png new file mode 100644 index 00000000..cdf686d7 Binary files /dev/null and b/images/jenkins/slave_node.png differ diff --git a/jenkins/README.md b/jenkins/README.md new file mode 100644 index 00000000..b457e540 --- /dev/null +++ b/jenkins/README.md @@ -0,0 +1,142 @@ +## Configure Jenkins slave to master + +### Install Java-11 based on OS +rhel8 +```commandline +sudo yum install java-11-openjdk-devel +``` +amazon-linux +```commandline +yum install java-11-amazon-corretto-devel.x86_64 +``` + +### Install Docker on CentOS/ Fedora +Centos: https://docs.docker.com/engine/install/centos/ +Fedora: https://docs.docker.com/engine/install/fedora/ + +### Installing on Fedora +``` +sudo dnf -y install dnf-plugins-core +sudo dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo +sudo dnf install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +sudo systemctl start docker +``` + +### Add Jenkins user +```commandline +useradd jenkins -U -s /bin/bash +``` +Set password +```commandline +passwd jenkins +``` +ex: test123 + +### Add Jenkins to the sudoers file + +$ vi /etc/sudoers +```commandline +jenkins ALL=(ALL) NOPASSWD: ALL +``` + +### Create a Key-Pair +```commandline +ssh-keygen +cat .ssh/id_rsa.pub >> .ssh/authorized_keys +``` + +### Connect to Jenkins Master with your node + +Open Jenkins master +1. Manage Jenkins +![Manage Jenkins](../images/jenkins/manage_jenkins.png) +2. Manage Nodes and Clouds +![ManageNodes](../images/jenkins/manage_nodes.png) +3. New Node +![NewNode](../images/jenkins/new_node.png) +4. Add details +![Details](../images/jenkins/slave_node.png) +5. Configure Node +![Config](../images/jenkins/jenkins_config_file.png) + +Click on add to add jenkins user and private_key +![PrivateKey](../images/jenkins/add_creds.png) + + +Give permissions to **jenkins** user to run docker daemon +```commandline +sudo chown jenkins:jenkins /var/run/docker.sock +``` +Check docker working from jenkins user +```commandline +sudo su - jenkins +docker images +``` + +Now you are ready to run the cloud-governance policies + +### Run the ElasticSearch, Grafana, Kibana as a container within the same network + +#### Using the docker engine +```commandline +# detached mode +docker-compose -f docker_compose_file_path up -d +# down the containers +docker-compose -f docker_compose_file_path down +``` + +#### Using the podman + +create elasticsearch & grafana local persistence Directories + +Create and Allow Permissions +```commandline +CLOUD_GOVERNANCE_PATH="" +mkdir -p $CLOUD_GOVERNANCE_PATH/grafana +mkdir -p $CLOUD_GOVERNANCE_PATH/elasticsearch + +# Give permissions +chmod 777 -R $CLOUD_GOVERNANCE_PATH/grafana +chmod 777 $CLOUD_GOVERNANCE_PATH/elasticsearch +``` + +```commandline +# Run the containers in pods +podman play kube file.yml +# Delete the containers in pods +podman play kube --down file.yml +``` + + +## How to create a new user and s3 bucket on AWS + +Goto IAM Services to create policy and user + +1. Create Policy named CloudGovernancePolicy + 2. Use [CloudGovernanceDeletePolicy.json](..%2Fiam%2Fclouds%2Faws%2FCloudGovernanceDeletePolicy.json) to create the policy +2. Create cloud-governance-user +3. Attach the CloudGovernancePolicy to cloud-governance-user. +3. Create s3 bucket: cloud-governance-* + + +#### How to pass aws credentials to jenkins job + +1. Create the json file with below format and save it in local env. +2. Create/Update the jenkins file credential + + +```commandline +{ +"account1": { + "AWS_ACCESS_KEY_ID": "acces_key", + "AWS_SECRET_ACCESS_KEY" : "acees_secret", + "BUCKET" : "bucket_name" + }, +"account2": { + "AWS_ACCESS_KEY_ID": "acces_key", + "AWS_SECRET_ACCESS_KEY" : "acees_secret", + "BUCKET" : "bucket_name" + } +} +``` + diff --git a/jenkins/Step_by_Step.md b/jenkins/Step_by_Step.md new file mode 100644 index 00000000..dfdd32c6 --- /dev/null +++ b/jenkins/Step_by_Step.md @@ -0,0 +1,89 @@ +# How to create a new user for cloud-governance +1. Create a IAM policy CloudGovernanceDeletePolicy + 1. Use [CloudGovernanceDeletePolicy.json](iam/clouds/aws/CloudGovernanceDeletePolicy.json) to create the policy +2. Create **cloud-governance-user** and add the above created policy. +3. Create s3 bucket to store policy results. + +# Adding jenkins slave +1. Install java-11-jdk + ```commandline + sudo yum install java-11-openjdk-devel + ``` +2. Install docker on [Fedora](https://docs.docker.com/engine/install/fedora/) + ```commandline + sudo dnf -y install dnf-plugins-core + sudo dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo + sudo dnf install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-compose + sudo systemctl start docker + ``` +3. Create Jenkins user + ```commandline + useradd jenkins -U -s /bin/bash + passwd jenkins + ``` +4. Add jenkins user to sudoers file + ``` + $ vi /etc/sudoers + jenkins ALL=(ALL) NOPASSWD: ALL + ``` +5. Giving permissions to jenkins user to run docker container + ``` + sudo chown jenkins:jenkins /var/run/docker.sock + ``` + +6. Run the cloud_governance_stack [ ElasticSearch, Kibana, Grafana] + ```commandline + # using docker-compose.yml + # detached mode + docker-compose -f [docker_compose_file_path](jenkins/docker-compose.yml) up -d + # down the containers + docker-compose -f [docker_compose_file_path](jenkins/docker-compose.yml) down + ``` + +# Connect Jenkins slave to master +1. Goto Jenkins master. +2. Click on **Manage Jenkins** +3. CLick on **Manager Nodes and Clouds** +4. Click on New Node +5. Add details like node **Name** +6. Configure Node + 1. Remote root directory: **/home/jenkins** + 2. LaunchMethod: Launch agents via ssh + 1. Host: **hostname** + 2. Credentials: *select you creds from drop down* + 1. ADD CREDS: select kind as Username with password + 3. Host key Verification Strategy: _Non verifying Verification Strategy_ + 4. Click on Advanced: + 1. Port: 22/ + 2. JavaPath: /usr/bin/java +7. Click on save. +8. Check logs, if slave is connected to master or not. + +## How to add AWS Creds to jenkins master. +1. Create a JSON file with below format and save it. [ Keep it safe ] + ```commandline + { + "account1": { + "AWS_ACCESS_KEY_ID": "acces_key", + "AWS_SECRET_ACCESS_KEY" : "acees_secret", + "BUCKET" : "bucket_name" + }, + "account2": { + "AWS_ACCESS_KEY_ID": "acces_key", + "AWS_SECRET_ACCESS_KEY" : "acees_secret", + "BUCKET" : "bucket_name" + } + } + ``` +2. Login into the jenkins console. +3. Click on Manager Jenkins +4. Select Manage Credentials +5. Click on **System**, select the domain that your creds will be stored + 1. Add Credentials. + 1. Select **secret file** + 2. Give the Id + 3. Upload the json file + 2. Update Credentials + 1. Select the secret you want to upgrade. + 2. If it is a file secret. + 3. Upload the modified file. \ No newline at end of file diff --git a/jenkins/cloud_resource_orchestration/Jenkinsfile b/jenkins/cloud_resource_orchestration/Jenkinsfile index b50df9cf..38dbd264 100644 --- a/jenkins/cloud_resource_orchestration/Jenkinsfile +++ b/jenkins/cloud_resource_orchestration/Jenkinsfile @@ -22,7 +22,10 @@ pipeline { JIRA_TOKEN = credentials('JIRA_TOKEN') JIRA_QUEUE = credentials('JIRA_QUEUE') CLOUD_RESOURCE_ORCHESTRATION_INDEX = credentials('cloud-resource-orchestration-index') - + CRO_REPLACED_USERNAMES = credentials('cloud_governance_cro_replaces_usernames') + CRO_PORTAL = credentials('cloud_governance_cro_portal') + CRO_COST_OVER_USAGE = credentials('cloud_governance_cro_cost_over_usage') + CRO_ES_INDEX = credentials('cloud_governance_cro_es_index') contact1 = "ebattat@redhat.com" contact2 = "athiruma@redhat.com" } @@ -34,17 +37,17 @@ pipeline { } stage('Initial Cleanup') { steps { - sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi''' + sh '''if [[ "$(podman images -q quay.io/athiru/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi''' } } - stage('Upload ElasticSearch') { + stage('Run the CloudResourceOrchestration') { steps { sh 'python3 jenkins/cloud_resource_orchestration/run_cloud_resource_orchestration.py' } } stage('Finalize Cleanup') { steps { - sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi''' + sh '''if [[ "$(podman images -q quay.io/athiru/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/athiru/cloud-governance 2> /dev/null); fi''' deleteDir() } } diff --git a/jenkins/cloud_resource_orchestration/run_cloud_resource_orchestration.py b/jenkins/cloud_resource_orchestration/run_cloud_resource_orchestration.py index 654fec68..d33daae8 100644 --- a/jenkins/cloud_resource_orchestration/run_cloud_resource_orchestration.py +++ b/jenkins/cloud_resource_orchestration/run_cloud_resource_orchestration.py @@ -16,25 +16,34 @@ JIRA_QUEUE = os.environ['JIRA_QUEUE'] special_user_mails = os.environ['CLOUD_GOVERNANCE_SPECIAL_USER_MAILS'] CLOUD_RESOURCE_ORCHESTRATION_INDEX = os.environ['CLOUD_RESOURCE_ORCHESTRATION_INDEX'] - +CRO_REPLACED_USERNAMES = os.environ['CRO_REPLACED_USERNAMES'] +CRO_DEFAULT_ADMINS = ['athiruma', 'ebattat', 'natashba'] +CRO_PORTAL = os.environ['CRO_PORTAL'] +CRO_COST_OVER_USAGE = os.environ['CRO_COST_OVER_USAGE'] +CRO_ES_INDEX = os.environ['CRO_ES_INDEX'] es_index = CLOUD_RESOURCE_ORCHESTRATION_INDEX input_vars_to_container = [{'account': 'perf-dept', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PERF, - 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF, 'CLOUD_NAME': 'aws'}, + 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF, 'PUBLIC_CLOUD_NAME': 'AWS'}, {'account': 'perf-scale', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE, - 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE, 'CLOUD_NAME': 'aws'}, + 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE, 'PUBLIC_CLOUD_NAME': 'AWS'}, {'account': 'psap', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PSAP, - 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PSAP, 'CLOUD_NAME': 'aws'}] + 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PSAP, 'PUBLIC_CLOUD_NAME': 'AWS'}] + +os.system('echo Run CloudResourceOrchestration in pre active region') -print('Run LongRun in pre active region') -regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ap-south-1'] +common_input_vars = {'es_host': ES_HOST, 'es_port': ES_PORT, 'CRO_ES_INDEX': CRO_ES_INDEX, 'log_level': 'INFO', 'LDAP_HOST_NAME': LDAP_HOST_NAME, + 'JIRA_QUEUE': JIRA_QUEUE, 'JIRA_TOKEN': JIRA_TOKEN, 'JIRA_USERNAME': JIRA_USERNAME, 'JIRA_URL': JIRA_URL, + 'CRO_COST_OVER_USAGE': CRO_COST_OVER_USAGE, 'CRO_PORTAL': CRO_PORTAL, 'CRO_DEFAULT_ADMINS': CRO_DEFAULT_ADMINS, 'CRO_REPLACED_USERNAMES': CRO_REPLACED_USERNAMES, + 'CE_PAYER_INDEX': 'cloud-governance-clouds-billing-reports', 'RUN_ACTIVE_REGIONS': True, 'AWS_DEFAULT_REGION': 'us-east-1', 'AWS_MAX_ATTEMPTS': 5, 'AWS_RETRY_MODE': 'standard'} +# Added the AWS_MAX_ATTEMPTS, AWS_RETRY_MODE to handle the RateLimit Exception in aws api calls using boto3 +# for more information on throttle api calls: https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html +# AWS Default varibles https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#:~:text=to%20use%20this.-,AWS_MAX_ATTEMPTS,-The%20total%20number -common_input_vars = {'es_host': ES_HOST, 'es_port': ES_PORT, 'es_index': es_index, 'log_level': 'INFO', 'LDAP_HOST_NAME': LDAP_HOST_NAME, - 'JIRA_QUEUE': JIRA_QUEUE, 'JIRA_TOKEN': JIRA_TOKEN, 'JIRA_USERNAME': JIRA_USERNAME, 'JIRA_URL': JIRA_URL, 'MANAGEMENT': True, 'special_user_mails': f"{special_user_mails}"} combine_vars = lambda item: f'{item[0]}="{item[1]}"' common_envs = list(map(combine_vars, common_input_vars.items())) for input_vars in input_vars_to_container: + os.system(f"""echo Running on Account {input_vars.get("account").upper()}""") envs = list(map(combine_vars, input_vars.items())) - for region in regions: - os.system(f"""podman run --net="host" --rm --name cloud_resource_orchestration -e MONITOR="long_run" -e AWS_DEFAULT_REGION="{region}" -e {' -e '.join(envs)} -e {' -e '.join(common_envs)} quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --net="host" --rm --name cloud_resource_orchestration -e CLOUD_RESOURCE_ORCHESTRATION="True" -e EMAIL_ALERT="True" -e {' -e '.join(envs)} -e {' -e '.join(common_envs)} quay.io/ebattat/cloud-governance:latest""") diff --git a/jenkins/clouds/aws/daily/cost_explorer/run_upload_es.py b/jenkins/clouds/aws/daily/cost_explorer/run_upload_es.py index ab8e365c..d9d7a1bb 100644 --- a/jenkins/clouds/aws/daily/cost_explorer/run_upload_es.py +++ b/jenkins/clouds/aws/daily/cost_explorer/run_upload_es.py @@ -29,8 +29,7 @@ es_index_psap = 'cloud-governance-cost-explorer-psap' es_index_perf_scale = 'cloud-governance-cost-explorer-perf-scale' es_index_global = 'cloud-governance-cost-explorer-global' - -cost_tags = ['ChargeType', 'User', 'Budget', 'Project', 'Manager', 'Owner', 'LaunchTime', 'Name', 'Email', 'Environment'] +cost_tags = ['PurchaseType', 'ChargeType', 'User', 'Budget', 'Project', 'Manager', 'Owner', 'LaunchTime', 'Name', 'Email', 'Environment', 'User:Spot'] # Cost Explorer upload to ElasticSearch cost_metric = 'UnblendedCost' # UnblendedCost/BlendedCost @@ -45,7 +44,7 @@ # os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="psap" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index_global}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") # os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="perf-scale" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index_global}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") -es_index_global = 'cloud-governance-cost-explorer-global-cost' +es_index_global = 'cloud-governance-cost-explorer-perf-global-cost' os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="perf-dept" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index_global}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="psap" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index_global}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="perf-scale" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index_global}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") diff --git a/jenkins/clouds/aws/daily/org_cost_explorer/Jenkinsfile b/jenkins/clouds/aws/daily/org_cost_explorer/Jenkinsfile index 888aa3cf..5ccc1b1e 100644 --- a/jenkins/clouds/aws/daily/org_cost_explorer/Jenkinsfile +++ b/jenkins/clouds/aws/daily/org_cost_explorer/Jenkinsfile @@ -16,6 +16,12 @@ pipeline { AWS_ACCOUNT_ROLE = credentials('cloud-governance-aws-account-role') COST_CENTER_OWNER = credentials('cloud-governance-cost-center-owner') REPLACE_ACCOUNT_NAME = credentials('cloud-governance-replace-account-names') + PAYER_SUPPORT_FEE_CREDIT = credentials('cloud-governance-aws-payer-support-fee-credit') + AWS_SECRET_ACCESS_KEY_ATHIRUMA_BOT = credentials('AWS_SECRET_ACCESS_KEY_ATHIRUMA_BOT') + AWS_ACCESS_KEY_ID_ATHIRUMA_BOT = credentials('AWS_ACCESS_KEY_ID_ATHIRUMA_BOT') + S3_RESULTS_PATH = credentials('S3_RESULTS_PATH') + ATHENA_DATABASE_NAME = credentials('ATHENA_DATABASE_NAME') + ATHENA_TABLE_NAME = credentials('ATHENA_TABLE_NAME') contact1 = "ebattat@redhat.com" contact2 = "athiruma@redhat.com" @@ -31,7 +37,7 @@ pipeline { sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi''' } } - stage('Upload ElasticSearch') { + stage('Run the AWS Cost Reports') { steps { sh 'python3 jenkins/clouds/aws/daily/org_cost_explorer/run_org_upload_es.py' } diff --git a/jenkins/clouds/aws/daily/org_cost_explorer/run_org_upload_es.py b/jenkins/clouds/aws/daily/org_cost_explorer/run_org_upload_es.py index 2696c236..f9660ccc 100644 --- a/jenkins/clouds/aws/daily/org_cost_explorer/run_org_upload_es.py +++ b/jenkins/clouds/aws/daily/org_cost_explorer/run_org_upload_es.py @@ -11,17 +11,35 @@ AWS_ACCOUNT_ROLE = os.environ['AWS_ACCOUNT_ROLE'] COST_CENTER_OWNER = os.environ['COST_CENTER_OWNER'] REPLACE_ACCOUNT_NAME = os.environ['REPLACE_ACCOUNT_NAME'] +PAYER_SUPPORT_FEE_CREDIT = os.environ['PAYER_SUPPORT_FEE_CREDIT'] +AWS_ACCESS_KEY_ID_ATHIRUMA_BOT = os.environ['AWS_ACCESS_KEY_ID_ATHIRUMA_BOT'] +AWS_SECRET_ACCESS_KEY_ATHIRUMA_BOT = os.environ['AWS_SECRET_ACCESS_KEY_ATHIRUMA_BOT'] +S3_RESULTS_PATH = os.environ['S3_RESULTS_PATH'] +ATHENA_DATABASE_NAME = os.environ['ATHENA_DATABASE_NAME'] +ATHENA_TABLE_NAME = os.environ['ATHENA_TABLE_NAME'] -print("Updating the Org level cost billing reports") +os.system('echo "Updating the Org level cost billing reports"') # Cost Explorer upload to ElasticSearch cost_metric = 'UnblendedCost' # UnblendedCost/BlendedCost granularity = 'DAILY' # DAILY/MONTHLY/HOURLY -common_input_vars = {'es_host': ES_HOST, 'es_port': ES_PORT, 'es_index': 'cloud-governance-global-cost-billing-reports', 'log_level': 'INFO', 'GOOGLE_APPLICATION_CREDENTIALS': GOOGLE_APPLICATION_CREDENTIALS, 'COST_CENTER_OWNER': f"{COST_CENTER_OWNER}", 'REPLACE_ACCOUNT_NAME': REPLACE_ACCOUNT_NAME} +common_input_vars = {'es_host': ES_HOST, 'es_port': ES_PORT, 'es_index': 'cloud-governance-global-cost-billing-reports', 'log_level': 'INFO', 'GOOGLE_APPLICATION_CREDENTIALS': GOOGLE_APPLICATION_CREDENTIALS, 'COST_CENTER_OWNER': f"{COST_CENTER_OWNER}", 'REPLACE_ACCOUNT_NAME': REPLACE_ACCOUNT_NAME, 'PAYER_SUPPORT_FEE_CREDIT': PAYER_SUPPORT_FEE_CREDIT} combine_vars = lambda item: f'{item[0]}="{item[1]}"' common_input_vars['es_index'] = 'cloud-governance-clouds-billing-reports' common_envs = list(map(combine_vars, common_input_vars.items())) os.system(f"""podman run --rm --name cloud-governance -e policy="cost_explorer_payer_billings" -e AWS_ACCOUNT_ROLE="{AWS_ACCOUNT_ROLE}" -e account="PERF-DEPT" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e SPREADSHEET_ID="{COST_SPREADSHEET_ID}" -e {' -e '.join(common_envs)} -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" quay.io/ebattat/cloud-governance:latest""") + + +os.system('echo "Run the Spot Analysis report over the account using AWS Athena"') +os.system(f"""podman run --rm --name cloud-governance -e policy="spot_savings_analysis" -e account="pnt-payer" \ +-e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_ATHIRUMA_BOT}" \ +-e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_ATHIRUMA_BOT}" \ +-e es_host="{ES_HOST}" -e es_port="{ES_PORT}" \ +-e es_index="cloud-governance-clouds-billing-reports" \ +-e S3_RESULTS_PATH="{S3_RESULTS_PATH}" \ +-e ATHENA_DATABASE_NAME="{ATHENA_DATABASE_NAME}" \ +-e ATHENA_TABLE_NAME="{ATHENA_TABLE_NAME}" \ +quay.io/ebattat/cloud-governance:latest""") diff --git a/jenkins/clouds/aws/daily/policies/run_policies.py b/jenkins/clouds/aws/daily/policies/run_policies.py index 70685243..7b70f002 100644 --- a/jenkins/clouds/aws/daily/policies/run_policies.py +++ b/jenkins/clouds/aws/daily/policies/run_policies.py @@ -55,35 +55,61 @@ def get_policies(type: str = None): policies.remove('cost_over_usage') policies.remove('monthly_report') policies.remove('cost_billing_reports') +policies.remove('cost_explorer_payer_billings') for region in regions: for policy in policies: # Delete zombie cluster resource every night dry_run=no if policy == 'zombie_cluster_resource': - os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") - os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PSAP" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PSAP}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") - os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PSAP" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PSAP}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") # running policies dry_run=no per every region, ebs_unattached, ec2_stop, ip_unattached, ec2_idle, nat_gateway_unused, zombie_snapshots - elif policy in ('ec2_idle', 'nat_gateway_unused', 'zombie_snapshots', 'ec2_stop', 'ebs_unattached', 'ip_unattached'): + elif policy in ('zombie_snapshots', 'ebs_unattached'): + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + elif policy in ('ec2_idle', 'ec2_stop'): os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + elif policy in ('nat_gateway_unused', 'ip_unattached'): + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e MANAGER_EMAIL_ALERT="False" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e MANAGER_EMAIL_ALERT="False" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") # running policies dry_run=no only one region, empty_roles, s3_inactive elif policy in ('empty_roles', 's3_inactive') and region == 'us-east-1': - os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") - os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e MANAGER_EMAIL_ALERT="False" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e MANAGER_EMAIL_ALERT="False" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") # running policies dry_run=yes per every region ebs_in_use, ec2_run else: if policy not in ('empty_roles', 's3_inactive'): - os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") - os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PSAP" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PSAP}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") - os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PSAP" -e MANAGER_EMAIL_ALERT="False" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PSAP}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") # Update AWS IAM User tags from the spreadsheet os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-DEPT" -e policy="tag_iam_user" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e user_tag_operation="update" -e SPREADSHEET_ID="{SPREADSHEET_ID}" -e GOOGLE_APPLICATION_CREDENTIALS="{GOOGLE_APPLICATION_CREDENTIALS}" -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" -e account_admin="{account_admin}" -e special_user_mails="{special_user_mails}" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-SCALE" -e policy="tag_iam_user" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e user_tag_operation="update" -e SPREADSHEET_ID="{SPREADSHEET_ID}" -e GOOGLE_APPLICATION_CREDENTIALS="{GOOGLE_APPLICATION_CREDENTIALS}" -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" -e account_admin="{account_admin}" -e special_user_mails="{special_user_mails}" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PSAP" -e policy="tag_iam_user" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e user_tag_operation="update" -e SPREADSHEET_ID="{SPREADSHEET_ID}" -e GOOGLE_APPLICATION_CREDENTIALS="{GOOGLE_APPLICATION_CREDENTIALS}" -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" -e account_admin="{account_admin}" -e special_user_mails="{special_user_mails}" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e log_level="INFO" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" quay.io/ebattat/cloud-governance:latest""") -# Gitleaks run on github not related to any aws account + +# Send Policy alerts to users +accounts = [{'account': 'PERF-DEPT', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PERF, + 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF, 'BUCKET_NAME': BUCKET_PERF}, + {'account': 'PERF-SCALE', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE, + 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE, 'BUCKET_NAME': BUCKET_PERF_SCALE}, + {'account': 'PSAP', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PSAP, + 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PSAP, 'BUCKET_NAME': BUCKET_PSAP}] +policies.remove('ec2_run') +policies.remove('ebs_in_use') +remove_polices = ['ec2_run', 'ebs_in_use', 'zombie_cluster_resource', 'ec2_idle', 'skipped_resources', 'ec2_stop'] # policies that will not aggregate +policies = [policy.replace('_', '-') for policy in policies if policy not in remove_polices] +common_input_vars = {'PUBLIC_CLOUD_NAME': 'AWS', 'BUCKET_KEY': 'logs', 'KERBEROS_USERS': f"{special_user_mails}", 'LDAP_HOST_NAME': f"{LDAP_HOST_NAME}", 'log_level': "INFO", 'MAIL_ALERT_DAYS': "[4, 6, 7]", 'POLICY_ACTIONS_DAYS': "[7]", 'POLICIES_TO_ALERT': policies, 'es_host': ES_HOST, 'es_port': ES_PORT} +combine_vars = lambda item: f'{item[0]}="{item[1]}"' +common_envs = list(map(combine_vars, common_input_vars.items())) +for account in accounts: + envs = list(map(combine_vars, account.items())) + os.system(f"""podman run --rm --name cloud-governance --net="host" -e policy="send_aggregated_alerts" -e {' -e '.join(envs)} -e {' -e '.join(common_envs)} -e DEFAULT_ADMINS="['athiruma']" quay.io/ebattat/cloud-governance:latest""") + +# # Gitleaks run on github not related to any aws account print("run gitleaks") region = 'us-east-1' policy = 'gitleaks' diff --git a/jenkins/clouds/aws/hourly/tagging/tagging.py b/jenkins/clouds/aws/hourly/tagging/tagging.py index 34d15e49..18670824 100644 --- a/jenkins/clouds/aws/hourly/tagging/tagging.py +++ b/jenkins/clouds/aws/hourly/tagging/tagging.py @@ -19,6 +19,6 @@ regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-central-1', 'ap-south-1', 'eu-north-1', 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-3', 'sa-east-1'] for region in regions: - os.system(f"""podman run --rm --name cloud-governance -e account="perf" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_perf}" -e log_level="INFO" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""") - os.system(f"""podman run --rm --name cloud-governance -e account="psap" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_psap}" -e log_level="INFO" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""") - os.system(f"""podman run --rm --name cloud-governance -e account="perf-scale" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_perf_scale}" -e log_level="INFO" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance -e account="perf" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_perf}" -e log_level="INFO" -e "AWS_MAX_ATTEMPTS"="5" -e "AWS_RETRY_MODE"="standard" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance -e account="psap" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_psap}" -e log_level="INFO" -e "AWS_MAX_ATTEMPTS"="5" -e "AWS_RETRY_MODE"="standard" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""") + os.system(f"""podman run --rm --name cloud-governance -e account="perf-scale" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_perf_scale}" -e "AWS_MAX_ATTEMPTS"="5" -e "AWS_RETRY_MODE"="standard" -e log_level="INFO" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""") diff --git a/jenkins/clouds/gcp/daily/cost_reports/Jenkinsfile b/jenkins/clouds/gcp/daily/cost_reports/Jenkinsfile new file mode 100644 index 00000000..e638348b --- /dev/null +++ b/jenkins/clouds/gcp/daily/cost_reports/Jenkinsfile @@ -0,0 +1,58 @@ +pipeline { + agent { + docker { + label 'cloud-governance-worker' + image 'quay.io/athiru/centos-stream8-podman:latest' + args '-u root -v /etc/postfix/main.cf:/etc/postfix/main.cf --privileged' + } + } + environment { + GCP_DATABASE_NAME = credentials('cloud-governance-gcp-database-name') + GCP_DATABASE_TABLE_NAME = credentials('cloud-governance-gcp-database-table-name') + ES_HOST = credentials('cloud-governance-es-host') + ES_PORT = credentials('cloud-governance-es-port') + COST_SPREADSHEET_ID = credentials('cloud-governance-cost-spreadsheet-id') + GOOGLE_APPLICATION_CREDENTIALS = credentials('cloud-governance-google-application-credentials') + + contact1 = "ebattat@redhat.com" + contact2 = "athiruma@redhat.com" + } + stages { + stage('Checkout') { // Checkout (git clone ...) the projects repository + steps { + checkout scm + } + } + stage('Initial Cleanup') { + steps { + sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi''' + } + } + stage('Upload ElasticSearch') { + steps { + sh 'python3 jenkins/clouds/gcp/daily/cost_reports/run_reports.py' + } + } + stage('Finalize Cleanup') { + steps { + sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi''' + deleteDir() + } + } + } + post { + always { + deleteDir() + } + failure { + script { + msg = "Build error for ${env.JOB_NAME} ${env.BUILD_NUMBER} (${env.BUILD_URL})" + emailext body: """\ + Jenkins job: ${env.BUILD_URL}\nSee the console output for more details: ${env.BUILD_URL}consoleFull\n\n + """, + subject: msg, + to: "${contact1}, ${contact2}" + } + } + } +} diff --git a/jenkins/clouds/gcp/daily/cost_reports/run_reports.py b/jenkins/clouds/gcp/daily/cost_reports/run_reports.py new file mode 100644 index 00000000..e89d1647 --- /dev/null +++ b/jenkins/clouds/gcp/daily/cost_reports/run_reports.py @@ -0,0 +1,21 @@ + + +import os + +GCP_DATABASE_NAME = os.environ['GCP_DATABASE_NAME'] +GCP_DATABASE_TABLE_NAME = os.environ['GCP_DATABASE_TABLE_NAME'] +ES_HOST = os.environ['ES_HOST'] +ES_PORT = os.environ['ES_PORT'] +COST_SPREADSHEET_ID = os.environ['COST_SPREADSHEET_ID'] +GOOGLE_APPLICATION_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] + +print('Running the GCP cost billing reports') + +common_input_vars = {'es_host': ES_HOST, 'es_port': ES_PORT, 'es_index': 'cloud-governance-clouds-billing-reports', + 'log_level': 'INFO', 'GOOGLE_APPLICATION_CREDENTIALS': GOOGLE_APPLICATION_CREDENTIALS, + 'PUBLIC_CLOUD_NAME': 'GCP', 'SPREADSHEET_ID': COST_SPREADSHEET_ID, + 'GCP_DATABASE_NAME': GCP_DATABASE_NAME, 'GCP_DATABASE_TABLE_NAME': GCP_DATABASE_TABLE_NAME} + +combine_vars = lambda item: f'{item[0]}="{item[1]}"' +common_envs = list(map(combine_vars, common_input_vars.items())) +os.system(f"""podman run --rm --name cloud-governance -e policy="cost_billing_reports" -e {' -e '.join(common_envs)} -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" quay.io/ebattat/cloud-governance:latest""") diff --git a/jenkins/docker-compose.yml b/jenkins/docker-compose.yml new file mode 100644 index 00000000..de6802e9 --- /dev/null +++ b/jenkins/docker-compose.yml @@ -0,0 +1,40 @@ +version: '2.2' + +services: + grafana: + image: grafana/grafana:8.2.0 + container_name: grafana + ports: + - "3000:3000" + networks: + - monitoring-net + volumes: + - grafana-data:/var/lib/grafana + + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0 + container_name: elasticsearch + ports: + - "9200:9200" + environment: + - discovery.type=single-node + - xpack.security.enabled=false + networks: + - monitoring-net + volumes: + - elasticsearch-data:/usr/share/elasticsearch/data + + kibana: + image: docker.elastic.co/kibana/kibana:8.8.0 + container_name: kibana + ports: + - "5601:5601" + environment: + - ELASTICSEARCH_HOSTS=http://elasticsearch:9200 + networks: + - monitoring-net +networks: + monitoring-net: +volumes: + elasticsearch-data: + grafana-data: diff --git a/jenkins/poc/haim/daily/Jenkinsfile b/jenkins/poc/haim/daily/Jenkinsfile new file mode 100644 index 00000000..616cb281 --- /dev/null +++ b/jenkins/poc/haim/daily/Jenkinsfile @@ -0,0 +1,62 @@ +pipeline { + options { + disableConcurrentBuilds() + } + agent { + docker { + label 'cloud-governance-worker' + image 'quay.io/athiru/centos-stream8-podman:latest' + args '-u root -v /etc/postfix/main.cf:/etc/postfix/main.cf --privileged' + } + } + environment { + AWS_ACCESS_KEY_ID_APPENG = credentials('cloud-governance-aws-access-key-id-appeng') + AWS_SECRET_ACCESS_KEY_APPENG = credentials('cloud-governance-aws-secret-access-key-appeng') + AWS_IAM_USER_SPREADSHEET_ID = credentials('cloud-governance-aws-iam-user-spreadsheet-id') + GOOGLE_APPLICATION_CREDENTIALS = credentials('cloud-governance-google-application-credentials') + LDAP_HOST_NAME = credentials('cloud-governance-ldap-host-name') + ES_HOST = credentials('cloud-governance-es-host') + ES_PORT = credentials('cloud-governance-es-port') + BUCKET_APPENG = credentials('cloud-governance-bucket-appeng') + contact1 = "ebattat@redhat.com" + contact2 = "athiruma@redhat.com" + } + stages { + stage('Checkout') { // Checkout (git clone ...) the projects repository + steps { + checkout scm + } + } + stage('Initial Cleanup') { + steps { + sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi''' + } + } + stage('Run Policies for haim poc') { + steps { + sh 'python3 jenkins/poc/haim/daily/run_policies.py' + } + } + stage('Upload Policies output to ElasticSearch for haim poc') { + steps { + sh 'python3 jenkins/poc/haim/daily/es_upload.py' + } + } + stage('Finalize Cleanup') { + steps { + sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi''' + deleteDir() + } + } + } + post { + failure { + script { + msg = "Build error for ${env.JOB_NAME} ${env.BUILD_NUMBER} (${env.BUILD_URL})" + emailext body: """\ + Jenkins job: ${env.BUILD_URL}\nSee the console output for more details: ${env.BUILD_URL}consoleFull\n\n + """,subject: msg, to: "${contact1}, ${contact2}, ${contact3}" + } + } + } +} diff --git a/jenkins/poc/haim/daily/es_upload.py b/jenkins/poc/haim/daily/es_upload.py new file mode 100644 index 00000000..65987c78 --- /dev/null +++ b/jenkins/poc/haim/daily/es_upload.py @@ -0,0 +1,45 @@ + +import os + + +AWS_ACCESS_KEY_ID_APPENG = os.environ['AWS_ACCESS_KEY_ID_APPENG'] +AWS_SECRET_ACCESS_KEY_APPENG = os.environ['AWS_SECRET_ACCESS_KEY_APPENG'] +LDAP_HOST_NAME = os.environ['LDAP_HOST_NAME'] +BUCKET_APPENG = os.environ['BUCKET_APPENG'] +ES_HOST = os.environ['ES_HOST'] +ES_PORT = os.environ['ES_PORT'] +LOGS = os.environ.get('LOGS', 'logs') + + +def get_policies(type: str = None): + """ + This method return a list of policies name without extension, that can filter by type + @return: list of policies name + """ + policies = [] + policies_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))), 'cloud_governance', 'policy', 'aws') + for (dirpath, dirnames, filenames) in os.walk(policies_path): + for filename in filenames: + if not filename.startswith('__') and (filename.endswith('.yml') or filename.endswith('.py')): + if not type: + policies.append(os.path.splitext(filename)[0]) + elif type and type in filename: + policies.append(os.path.splitext(filename)[0]) + return policies + + +regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-central-1', 'ap-south-1', 'eu-north-1', 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-3', 'sa-east-1'] + +os.system('echo "Upload data to ElasticSearch - ec2 index"') + +es_index = 'cloud-governance-appeng-ec2-index' +es_doc_type = '_doc' +for region in regions: + for policy_types in ['ec2', 'zombie', 'ebs', 'empty_roles', 's3', 'ip', 'nat_gateway_unused']: + policies = get_policies(type=policy_types) + for policy in policies: + if policy in ('empty_roles', 's3_inactive'): + if region == 'us-east-1': + os.system(f"""podman run --rm --name cloud-governance-poc-haim -e upload_data_es="upload_data_es" -e account="APPENG" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index}" -e es_doc_type="{es_doc_type}" -e bucket="{BUCKET_APPENG}" -e policy="{policy}" -e AWS_DEFAULT_REGION="{region}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + else: + os.system(f"""podman run --rm --name cloud-governance-poc-haim -e upload_data_es="upload_data_es" -e account="APPENG" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index}" -e es_doc_type="{es_doc_type}" -e bucket="{BUCKET_APPENG}" -e policy="{policy}" -e AWS_DEFAULT_REGION="{region}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") diff --git a/jenkins/poc/haim/daily/run_policies.py b/jenkins/poc/haim/daily/run_policies.py new file mode 100644 index 00000000..c6aedc4a --- /dev/null +++ b/jenkins/poc/haim/daily/run_policies.py @@ -0,0 +1,53 @@ + +import os + + +AWS_ACCESS_KEY_ID_APPENG = os.environ['AWS_ACCESS_KEY_ID_APPENG'] +AWS_SECRET_ACCESS_KEY_APPENG = os.environ['AWS_SECRET_ACCESS_KEY_APPENG'] +LDAP_HOST_NAME = os.environ['LDAP_HOST_NAME'] +LOGS = os.environ.get('LOGS', 'logs') +ES_HOST = os.environ['ES_HOST'] +ES_PORT = os.environ['ES_PORT'] +BUCKET_APPENG = os.environ['BUCKET_APPENG'] + + +def get_policies(type: str = None): + """ + This method return a list of policies name without extension, that can filter by type + @return: list of policies name + """ + policies = [] + policies_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))), 'cloud_governance', 'policy', 'aws') + for (dirpath, dirnames, filenames) in os.walk(policies_path): + for filename in filenames: + if not filename.startswith('__') and (filename.endswith('.yml') or filename.endswith('.py')): + if not type: + policies.append(os.path.splitext(filename)[0]) + elif type and type in filename: + policies.append(os.path.splitext(filename)[0]) + return policies + + +regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-central-1', 'ap-south-1', 'eu-north-1', 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-3', 'sa-east-1'] +policies = get_policies() +not_action_policies = ['cost_explorer', 'cost_over_usage', 'monthly_report', 'cost_billing_reports', 'cost_explorer_payer_billings'] +run_policies = list(set(policies) - set(not_action_policies)) +run_policies.sort() + + +os.system(f"""echo Running the cloud_governance policies: {run_policies}""") +os.system(f"""echo "Running the CloudGovernance policies" """) +for region in regions: + for policy in run_policies: + if policy in ('empty_roles', 's3_inactive') and region == 'us-east-1': + os.system(f"""podman run --rm --name cloud-governance-poc-haim --net="host" -e MANAGER_EMAIL_ALERT="False" -e EMAIL_ALERT="False" -e account="APPENG" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_APPENG}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + else: + os.system(f"""podman run --rm --name cloud-governance-poc-haim --net="host" -e MANAGER_EMAIL_ALERT="False" -e EMAIL_ALERT="False" -e account="APPENG" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_APPENG}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + + +cost_tags = ['PurchaseType', 'ChargeType', 'User', 'Budget', 'Project', 'Manager', 'Owner', 'LaunchTime', 'Name', 'Email', 'Environment', 'User:Spot'] +cost_metric = 'UnblendedCost' # UnblendedCost/BlendedCost +granularity = 'DAILY' # DAILY/MONTHLY/HOURLY +cost_explorer_index = 'cloud-governance-haim-cost-explorer-global-index' +os.system(f"""echo "Running the CloudGovernance CostExplorer Policies" """) +os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="appeng" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{cost_explorer_index}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") diff --git a/jenkins/poc/haim/hourly/Jenkinsfile b/jenkins/poc/haim/hourly/Jenkinsfile new file mode 100644 index 00000000..696a043a --- /dev/null +++ b/jenkins/poc/haim/hourly/Jenkinsfile @@ -0,0 +1,54 @@ +pipeline { + options { + disableConcurrentBuilds() + } + agent { + docker { + label 'cloud-governance-worker' + image 'quay.io/athiru/centos-stream8-podman:latest' + args '-u root -v /etc/postfix/main.cf:/etc/postfix/main.cf --privileged' + } + } + environment { + AWS_ACCESS_KEY_ID_APPENG = credentials('cloud-governance-aws-access-key-id-appeng') + AWS_SECRET_ACCESS_KEY_APPENG = credentials('cloud-governance-aws-secret-access-key-appeng') + AWS_IAM_USER_SPREADSHEET_ID = credentials('cloud-governance-aws-iam-user-spreadsheet-id') + GOOGLE_APPLICATION_CREDENTIALS = credentials('cloud-governance-google-application-credentials') + LDAP_HOST_NAME = credentials('cloud-governance-ldap-host-name') + contact1 = "ebattat@redhat.com" + contact2 = "athiruma@redhat.com" + } + stages { + stage('Checkout') { // Checkout (git clone ...) the projects repository + steps { + checkout scm + } + } + stage('Initial Cleanup') { + steps { + sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi''' + } + } + stage('Run Tagging Cluster & Non-Cluster') { + steps { + sh 'python3 jenkins/poc/haim/hourly/run_policies.py' + } + } + stage('Finalize Cleanup') { + steps { + sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi''' + deleteDir() + } + } + } + post { + failure { + script { + msg = "Build error for ${env.JOB_NAME} ${env.BUILD_NUMBER} (${env.BUILD_URL})" + emailext body: """\ + Jenkins job: ${env.BUILD_URL}\nSee the console output for more details: ${env.BUILD_URL}consoleFull\n\n + """,subject: msg, to: "${contact1}, ${contact2}, ${contact3}" + } + } + } +} diff --git a/jenkins/poc/haim/hourly/run_policies.py b/jenkins/poc/haim/hourly/run_policies.py new file mode 100644 index 00000000..173483ff --- /dev/null +++ b/jenkins/poc/haim/hourly/run_policies.py @@ -0,0 +1,23 @@ + +import os + + +AWS_ACCESS_KEY_ID_APPENG = os.environ['AWS_ACCESS_KEY_ID_APPENG'] +AWS_SECRET_ACCESS_KEY_APPENG = os.environ['AWS_SECRET_ACCESS_KEY_APPENG'] +LDAP_HOST_NAME = os.environ['LDAP_HOST_NAME'] +GOOGLE_APPLICATION_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] +SPREADSHEET_ID = os.environ['AWS_IAM_USER_SPREADSHEET_ID'] + + +LOGS = os.environ.get('LOGS', 'logs') + +mandatory_tags_appeng = {'Budget': 'APPENG'} + +os.system(f"""echo "Running the tag_iam_user" """) +os.system(f"""podman run --rm --name cloud-governance-poc-haim --net="host" -e account="APPENG" -e -e EMAIL_ALERT="False" -e policy="tag_iam_user" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e user_tag_operation="update" -e SPREADSHEET_ID="{SPREADSHEET_ID}" -e GOOGLE_APPLICATION_CREDENTIALS="{GOOGLE_APPLICATION_CREDENTIALS}" -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""") + + +os.system(f"""echo "Running the tag_resources" """) +regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-central-1', 'ap-south-1', 'eu-north-1', 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-3', 'sa-east-1'] +for region in regions: + os.system(f"""podman run --rm --name cloud-governance-poc-haim -e account="APPENG" -e EMAIL_ALERT="False" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_appeng}" -e log_level="INFO" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""") diff --git a/jenkins/podman_pod.yml b/jenkins/podman_pod.yml new file mode 100644 index 00000000..8a050426 --- /dev/null +++ b/jenkins/podman_pod.yml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: Pod +metadata: + name: cloud-governance-pod +spec: + containers: + - name: elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0 + ports: + - containerPort: 9200 + hostPort: 9200 + env: + - name: discovery.type + value: "single-node" + - name: xpack.security.enabled + value: "false" + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: elasticsearch-data + - name: kibana + image: docker.elastic.co/kibana/kibana:8.8.0 + ports: + - containerPort: 5601 + hostPort: 5601 + env: + - name: ELASTICSEARCH_HOSTS + value: http://localhost:9200 + - name: grafana + image: docker.io/grafana/grafana:8.2.0 + ports: + - containerPort: 3000 + hostPort: 3000 + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-data + volumes: + - name: elasticsearch-data + hostPath: + path: $CLOUD_GOVERNANCE_PATH/elasticsearch + type: DirectoryOrCreate + - name: grafana-data + hostPath: + path: $CLOUD_GOVERNANCE_PATH/grafana + type: DirectoryOrCreate + +# replace CLOUD_GOVERNANCE_PATH with directory_name diff --git a/requirements.txt b/requirements.txt index 79174a42..9a6c7e74 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,29 +1,30 @@ +aiohttp==3.8.1 attrs==21.4.0 azure-identity==1.12.0 azure-mgmt-billing==6.0.0 azure-mgmt-costmanagement==3.0.0 azure-mgmt-subscription==3.1.1 -botocore==1.29.1 -boto3==1.26.1 +boto3==1.26.4 +botocore==1.29.4 elasticsearch==7.11.0 elasticsearch-dsl==7.4.0 google-api-python-client==2.57.0 google-auth-httplib2==0.1.0 google-auth-oauthlib==0.5.2 +google-cloud-bigquery==3.5.0 +google-cloud-billing==1.9.1 ibm_platform_services==0.27.0 myst-parser==0.17.0 +oauthlib~=3.1.1 pandas +PyAthena[Pandas]==3.0.5 PyGitHub==1.55 -requests==2.27.1 +python-ldap==3.4.2 +requests==2.31.0 retry==0.9.2 SoftLayer==6.0.0 sphinx==4.5.0 sphinx-rtd-theme==1.0.0 -python-ldap==3.4.2 typing==3.7.4.3 typeguard==2.13.3 - -# EC2 LongRun Required by Jira -aiohttp==3.8.1 urllib3==1.26.7 -oauthlib~=3.1.1 \ No newline at end of file diff --git a/setup.py b/setup.py index 4b6e6bd8..c3f5a1b3 100644 --- a/setup.py +++ b/setup.py @@ -2,13 +2,10 @@ from os import path from setuptools import setup, find_packages - -__version__ = '1.1.74' - +__version__ = '1.1.146' here = path.abspath(path.dirname(__file__)) - if path.isfile(path.join(here, 'README.md')): with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() @@ -41,33 +38,37 @@ packages=find_packages(include=['cloud_governance', 'cloud_governance.*']), install_requires=[ + 'aiohttp==3.8.1', # required by jira 'attrs==21.4.0', # readthedocs 'azure-identity==1.12.0', # azure identity + 'azure-mgmt-billing==6.0.0', # azure billing management 'azure-mgmt-costmanagement==3.0.0', # azure cost management 'azure-mgmt-subscription==3.1.1', # azure subscriptions - 'azure-mgmt-billing==6.0.0', # azure billing management - 'botocore==1.29.1', # required by c7n 0.9.14 - 'boto3==1.26.1', # required by c7n 0.9.14 - 'elasticsearch==7.11.0', # depend on elasticsearch server + 'boto3==1.26.4', # required by c7n 0.9.14 + 'botocore==1.29.4', # required by c7n 0.9.14 'elasticsearch-dsl==7.4.0', + 'elasticsearch==7.11.0', # depend on elasticsearch server 'google-api-python-client==2.57.0', # google drive 'google-auth-httplib2==0.1.0', # google drive 'google-auth-oauthlib==0.5.2', # google drive + 'google-cloud-bigquery==3.5.0', # google cloud cost + 'google-cloud-billing==1.9.1', # google cloud cost 'ibm_platform_services==0.27.0', # IBM Usage reports 'myst-parser==0.17.0', # readthedocs + 'oauthlib~=3.1.1', # required by jira 'pandas', # latest: aggregate ec2/ebs cluster data + 'PyAthena[Pandas]==3.0.5', # AWS Athena package 'PyGitHub==1.55', # gitleaks 'python-ldap==3.4.2', # prerequisite: sudo dnf install -y python39-devel openldap-devel gcc - 'requests==2.27.1', # rest api & lambda + 'requests==2.31.0', # rest api & lambda 'retry==0.9.2', 'SoftLayer==6.0.0', # IBM SoftLayer - 'sphinx==4.5.0', # readthedocs 'sphinx-rtd-theme==1.0.0', # readthedocs - 'typing==3.7.4.3', + 'sphinx==4.5.0', # readthedocs 'typeguard==2.13.3', # checking types - 'aiohttp==3.8.1', # required by jira + 'typing==3.7.4.3', 'urllib3==1.26.7', # required by jira - 'oauthlib~=3.1.1', # required by jira + ], setup_requires=['pytest', 'pytest-runner', 'wheel', 'coverage'], diff --git a/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py b/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py index 81b253d2..9a6eb190 100644 --- a/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py +++ b/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py @@ -1,3 +1,6 @@ +import datetime + +import pytest from cloud_governance.common.clouds.azure.cost_management.cost_management_operations import CostManagementOperations @@ -8,7 +11,13 @@ def test_get_usage(): @return: """ cost_management_operations = CostManagementOperations() - cost_usage_data = cost_management_operations.get_usage(scope=cost_management_operations.azure_operations.scope) + end_date = datetime.datetime.utcnow() - datetime.timedelta(days=2) + start_date = end_date - datetime.timedelta(days=1) + granularity = 'Daily' + cost_usage_data = cost_management_operations.get_usage(scope=cost_management_operations.azure_operations.scope, + start_date=start_date, end_date=end_date, + granularity=granularity + ) assert cost_usage_data @@ -18,5 +27,10 @@ def test_get_forecast(): @return: """ cost_management_operations = CostManagementOperations() - cost_forecast_data = cost_management_operations.get_forecast(scope=cost_management_operations.azure_operations.scope) + end_date = datetime.datetime.utcnow() + datetime.timedelta(days=1) + start_date = end_date - datetime.timedelta(days=1) + granularity = 'Daily' + cost_forecast_data = cost_management_operations.get_forecast(scope=cost_management_operations.azure_operations.scope, + start_date=start_date, end_date=end_date, + granularity=granularity) assert cost_forecast_data diff --git a/tests/integration/cloud_governance/common/clouds/gcp/__init__.py b/tests/integration/cloud_governance/common/clouds/gcp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/cloud_governance/common/clouds/gcp/test_google_account.py b/tests/integration/cloud_governance/common/clouds/gcp/test_google_account.py new file mode 100644 index 00000000..f0d504bf --- /dev/null +++ b/tests/integration/cloud_governance/common/clouds/gcp/test_google_account.py @@ -0,0 +1,27 @@ +from datetime import datetime, timedelta + +from cloud_governance.common.clouds.gcp.google_account import GoogleAccount +from cloud_governance.main.environment_variables import environment_variables + + +def test_query_list(): + """ + This method test fetching of the big queries data + :return: + """ + environment_variables_dict = environment_variables.environment_variables_dict + database_name = environment_variables_dict.get('GCP_DATABASE_NAME') + database_table_name = environment_variables_dict.get('GCP_DATABASE_TABLE_NAME') + current_date = datetime.now() - timedelta(days=1) + month = str(current_date.month) + if len(month) != 2: + month = f'0{month}' + year = current_date.year + year_month = f'{year}{month}' + fetch_query = f"""SELECT invoice.month + FROM `{database_name}.{database_table_name}` + where invoice.month = '{year_month}' group by invoice.month""" + gcp_account = GoogleAccount() + result_year_month = gcp_account.query_list([fetch_query])[0][0].get('month') + assert result_year_month == year_month + diff --git a/tests/integration/cloud_governance/common/elasticsearch/test_elasticsearch_operations.py b/tests/integration/cloud_governance/common/elasticsearch/test_elasticsearch_operations.py index d8c3a445..1e184392 100644 --- a/tests/integration/cloud_governance/common/elasticsearch/test_elasticsearch_operations.py +++ b/tests/integration/cloud_governance/common/elasticsearch/test_elasticsearch_operations.py @@ -48,7 +48,7 @@ def test_delete_data_between_range(): es.delete_data_in_between_in_es(es_index=es_index, start_datetime=start_time, end_datetime=end_time) start_time = end_time.replace(hour=0, minute=0, second=0) end_time = datetime.datetime.now() - assert len(es.fetch_data_between_range(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 1 + assert len(es.fetch_data_by_es_query(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 1 es.delete_data_in_es(es_index=es_index) @@ -63,10 +63,10 @@ def test_fetch_data_between_range(): time.sleep(3) end_time = datetime.datetime.now() start_time = (end_time - datetime.timedelta(1)).replace(hour=0, minute=0, second=0) - assert len(es.fetch_data_between_range(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 1 + assert len(es.fetch_data_by_es_query(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 1 es.delete_data_in_es(es_index=es_index) start_time = end_time - datetime.timedelta(1) - assert len(es.fetch_data_between_range(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 0 + assert len(es.fetch_data_by_es_query(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 0 es.delete_data_in_es(es_index=es_index) @@ -78,4 +78,4 @@ def test_delete_data_in_elastic_search(): es.delete_data_in_es(es_index=es_index) end_time = datetime.datetime.now().replace(hour=0, minute=0, second=0) start_time = (end_time - datetime.timedelta(1)).replace(hour=0, minute=0, second=0) - assert len(es.fetch_data_between_range(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 0 + assert len(es.fetch_data_by_es_query(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 0 diff --git a/tests/unittest/cloud_governance/aws/zombie_cluster/test_zombie_ec2_cluster_delete_resource.py b/tests/unittest/cloud_governance/aws/zombie_cluster/test_zombie_ec2_cluster_delete_resource.py index d868553a..8f245316 100644 --- a/tests/unittest/cloud_governance/aws/zombie_cluster/test_zombie_ec2_cluster_delete_resource.py +++ b/tests/unittest/cloud_governance/aws/zombie_cluster/test_zombie_ec2_cluster_delete_resource.py @@ -203,6 +203,7 @@ def test_delete_ec2_elastic_load_balancer_v2(): assert not EC2Operations(region_name).find_load_balancer_v2(elb_name='test-load-balancer-v2') +@pytest.mark.skip(reason="Handled by ebs_unattached") @mock_ec2 def test_delete_ebs_volume(): """ diff --git a/tests/unittest/cloud_governance/aws/zombie_non_cluster/test_zombie_nat_gateways.py b/tests/unittest/cloud_governance/aws/zombie_non_cluster/test_unused_nat_gateways.py similarity index 92% rename from tests/unittest/cloud_governance/aws/zombie_non_cluster/test_zombie_nat_gateways.py rename to tests/unittest/cloud_governance/aws/zombie_non_cluster/test_unused_nat_gateways.py index 4e55df33..7ade0bd8 100644 --- a/tests/unittest/cloud_governance/aws/zombie_non_cluster/test_zombie_nat_gateways.py +++ b/tests/unittest/cloud_governance/aws/zombie_non_cluster/test_unused_nat_gateways.py @@ -15,13 +15,13 @@ def test_nat_gateway_unused(): This method tests, deletion od unused of NatGateways @return: """ - os.environ['policy'] = 'nat_gateway_unused' + os.environ['policy'] = 'unused_nat_gateway' ec2_client = boto3.client('ec2', region_name=os.environ.get('AWS_DEFAULT_REGION')) subnet_id = ec2_client.describe_subnets()['Subnets'][0].get('SubnetId') ec2_client.create_nat_gateway(SubnetId=subnet_id) nat_gateway_unused = NonClusterZombiePolicy() nat_gateway_unused.set_dryrun(value='no') - nat_gateway_unused.set_policy(value='nat_gateway_unused') + nat_gateway_unused.set_policy(value='unused_nat_gateway') nat_gateway_unused.DAYS_TO_TRIGGER_RESOURCE_MAIL = -1 nat_gateway_unused._check_resource_and_delete(resource_name='Nat Gateway', resource_id='NatGatewayId', @@ -39,7 +39,7 @@ def test_nat_gateway_unused_not_delete(): This method tests, deletion od unused of NatGateways @return: """ - os.environ['policy'] = 'nat_gateway_unused' + os.environ['policy'] = 'unused_nat_gateway' tags = [ {'Key': 'Name', 'Value': 'CloudGovernanceTestZombieNatGateway'}, {'Key': 'Owner', 'Value': 'CloudGovernance'}, @@ -50,7 +50,7 @@ def test_nat_gateway_unused_not_delete(): ec2_client.create_nat_gateway(SubnetId=subnet_id, TagSpecifications=[{'ResourceType': 'nat-gateway', 'Tags': tags}]) nat_gateway_unused = NonClusterZombiePolicy() nat_gateway_unused.set_dryrun(value='no') - nat_gateway_unused.set_policy(value='nat_gateway_unused') + nat_gateway_unused.set_policy(value='unused_nat_gateway') nat_gateway_unused.DAYS_TO_TRIGGER_RESOURCE_MAIL = -1 nat_gateway_unused._check_resource_and_delete(resource_name='Nat Gateway', resource_id='NatGatewayId', diff --git a/tests/unittest/cloud_governance/cloud_resource_orchestration/__init__.py b/tests/unittest/cloud_governance/cloud_resource_orchestration/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/__init__.py b/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/mock_jira.py b/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/mock_jira.py new file mode 100644 index 00000000..11032229 --- /dev/null +++ b/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/mock_jira.py @@ -0,0 +1,103 @@ +from datetime import datetime, timedelta +from functools import wraps +from unittest.mock import patch + + +from cloud_governance.common.jira.jira_operations import JiraOperations + + +def get_ticket_response(): + """ + This method return the ticket data + :return: + """ + created = datetime.strftime(datetime.utcnow() - timedelta(days=2), "%Y-%m-%dT%H:%M:%S") + response = { + 'key': 'MOCK-1', + 'fields': { + 'status': {'name': 'Refinement'}, + 'created': created, + 'description': "First Name: Test\n" + "Last Name: Mock\nEmail Address: mock@gmail.com\n" + "Manager Approval Address: manager@gmail.com\nCC-Users: \nDays: 5\n" + "Project: mock-test\nRegion: ap-south-1\nFull Summary: This is the test mock test\n" + "Cloud Name: mock\nAccount Name: mock-account\nInstance Types: t2.micro: 5\n" + "Cost Estimation:12.0\nDetails: This is the test machine \n" + "ApprovedManager: mockapproval@gmail.com \n" + "Region: ap-south-1 \n" + } + } + + return response + + +def mock_get_issue(*args, **kwargs): + """ + This method is mock for the get ticket data + :param kwargs: + :return: + """ + if kwargs.get('ticket_id'): + return get_ticket_response() + + +def mock_move_issue_state(*args, **kwargs): + """ + This method is mocking for moving Jira tickets + :param kwargs: + :return: + """ + if kwargs.get('ticket_id') and kwargs.get('state'): + return True + return False + + +async def mock_get_all_issues(*args, **kwargs): + """ + This method is mocking for search all tickets + :param args: + :param kwargs: + :return: + """ + if kwargs.get('query'): + response = { + 'issues': { + 'key': 'MOCK-1', + 'fields': { + 'status': {'name': 'Refinement'}, + 'created': datetime.utcnow() - timedelta(days=2), + 'description': "First Name: Test\n" + "Last Name: Mock\nEmail Address: mock@gmail.com\n" + "Manager Approval Address: manager@gmail.com\nCC-Users: \nDays: 5\n" + "Project: mock-test\nRegion: ap-south-1\nFull Summary: This is the test mock test\n" + "Cloud Name: mock\nAccount Name: mock-account\nInstance Types: t2.micro: 5\n" + "Cost Estimation:12.0\nDetails: This is the test machine \n" + "ApprovedManager: mockapproval@gmail.com \n" + "Region: ap-south-1 \n" + } + } + } + return response + + +def mock_jira(method): + """ + This method is mocking for Jira class methods which are used in Jira Operations @param method: + @return: + """ + + @wraps(method) + def method_wrapper(*args, **kwargs): + """ + This is the wrapper method to wraps the method inside the function + @param args: + @param kwargs: + @return: + """ + with patch.object(JiraOperations, 'get_issue', mock_get_issue),\ + patch.object(JiraOperations, 'move_issue_state', mock_move_issue_state), \ + patch.object(JiraOperations, 'get_all_issues', mock_get_all_issues): + result = method(*args, **kwargs) + return result + + return method_wrapper diff --git a/tests/unittest/cloud_governance/cloud_resource_orchestration/test_monitor_cro_instances.py b/tests/unittest/cloud_governance/cloud_resource_orchestration/test_monitor_cro_instances.py new file mode 100644 index 00000000..83622979 --- /dev/null +++ b/tests/unittest/cloud_governance/cloud_resource_orchestration/test_monitor_cro_instances.py @@ -0,0 +1,33 @@ +import boto3 +from moto import mock_ec2, mock_iam, mock_cloudtrail + +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.monitor_cro_instances import MonitorCROInstances +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.tag_cro_instances import TagCROInstances +from cloud_governance.main.environment_variables import environment_variables +from tests.unittest.cloud_governance.cloud_resource_orchestration.mocks.mock_jira import mock_jira + +AWS_DEFAULT_REGION = 'ap-south-1' + + +@mock_iam +@mock_cloudtrail +@mock_jira +@mock_ec2 +def test_monitor_cro_instances(): + """ + This method verifies the cro data is returned or not + :return: + """ + environment_variables_dict = environment_variables.environment_variables_dict + environment_variables_dict['JIRA_TOKEN'] = '123456mock' + tags = [{'Key': 'TicketId', 'Value': '1'}] + ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) + default_ami_id = 'ami-03cf127a' + ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1, + TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}]) + tag_cro_instances = TagCROInstances(region_name=AWS_DEFAULT_REGION) + tag_cro_instances.run() + monitor_data = MonitorCROInstances(region_name=AWS_DEFAULT_REGION) + actual_result = monitor_data.run() + expected_result = 1 + assert len(actual_result) == expected_result diff --git a/tests/unittest/cloud_governance/cloud_resource_orchestration/test_tag_cro_instances.py b/tests/unittest/cloud_governance/cloud_resource_orchestration/test_tag_cro_instances.py new file mode 100644 index 00000000..c081e0b5 --- /dev/null +++ b/tests/unittest/cloud_governance/cloud_resource_orchestration/test_tag_cro_instances.py @@ -0,0 +1,30 @@ +import boto3 +from moto import mock_ec2, mock_cloudtrail, mock_iam + +from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.tag_cro_instances import TagCROInstances +from cloud_governance.main.environment_variables import environment_variables +from tests.unittest.cloud_governance.cloud_resource_orchestration.mocks.mock_jira import mock_jira + +AWS_DEFAULT_REGION = 'ap-south-1' + + +@mock_iam +@mock_cloudtrail +@mock_jira +@mock_ec2 +def test_tag_cro_instances(): + """ + This method tests the tagging of cro instances + :return: + """ + environment_variables_dict = environment_variables.environment_variables_dict + environment_variables_dict['JIRA_TOKEN'] = '123456mock' + tags = [{'Key': 'TicketId', 'Value': '1'}] + ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) + default_ami_id = 'ami-03cf127a' + ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1, + TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}]) + tag_cro_instances = TagCROInstances(region_name=AWS_DEFAULT_REGION) + actual_response = tag_cro_instances.run() + expected_response = 1 + assert len(actual_response) == expected_response diff --git a/tests/unittest/cloud_governance/common/clouds/aws/athena/__init__.py b/tests/unittest/cloud_governance/common/clouds/aws/athena/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittest/cloud_governance/common/clouds/aws/athena/test_athena_operations.py b/tests/unittest/cloud_governance/common/clouds/aws/athena/test_athena_operations.py new file mode 100644 index 00000000..7fde7264 --- /dev/null +++ b/tests/unittest/cloud_governance/common/clouds/aws/athena/test_athena_operations.py @@ -0,0 +1,14 @@ +from cloud_governance.common.clouds.aws.athena.pyathena_operations import PyAthenaOperations +from tests.unittest.cloud_governance.common.clouds.aws.mocks.aws_mock import mock_athena + + +@mock_athena +def test_execute_query(): + """ + This method mock athena for the PyAthena + :return: + """ + athena_operations = PyAthenaOperations() + expected_result = athena_operations.execute_query(query_string="select * from mock_table") + actual_result = [{'A': 1, 'B': 0}, {'A': 2, 'B': 1}, {'A': 3, 'B': 2}] + assert expected_result == actual_result diff --git a/tests/unittest/cloud_governance/common/clouds/aws/ec2/__init__.py b/tests/unittest/cloud_governance/common/clouds/aws/ec2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittest/cloud_governance/common/clouds/aws/ec2/test_ec2_operations.py b/tests/unittest/cloud_governance/common/clouds/aws/ec2/test_ec2_operations.py new file mode 100644 index 00000000..dd16a661 --- /dev/null +++ b/tests/unittest/cloud_governance/common/clouds/aws/ec2/test_ec2_operations.py @@ -0,0 +1,52 @@ +import boto3 +from moto import mock_ec2 + +from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations + +AWS_DEFAULT_REGION = 'ap-south-1' + + +@mock_ec2 +def test_get_ec2_instance_list(): + """ + This method returns the list of ec2 instances + :return: + """ + ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) + ec2_operations = EC2Operations(region=AWS_DEFAULT_REGION) + default_ami_id = 'ami-03cf127a' + tags = [{'Key': 'User', 'Value': 'cloud-governance'}] + ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1, + TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}]) + assert type(ec2_operations.get_ec2_instance_list()[0]) == dict + + +@mock_ec2 +def test_get_ec2_instance_ids(): + """ + This method tests the return the list instance_ids + :return: + """ + ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) + ec2_operations = EC2Operations(region=AWS_DEFAULT_REGION) + tags = [{'Key': 'User', 'Value': 'cloud-governance'}] + default_ami_id = 'ami-03cf127a' + ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1, + TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}]) + assert type(ec2_operations.get_ec2_instance_ids()[0]) == str + + +@mock_ec2 +def test_tag_ec2_resources(): + """ + This method tests the method tagged instances by batch wise + :return: + """ + ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) + ec2_operations = EC2Operations(region=AWS_DEFAULT_REGION) + tags = [{'Key': 'User', 'Value': 'cloud-governance'}] + default_ami_id = 'ami-03cf127a' + for i in range(25): + ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1) + resource_ids = ec2_operations.get_ec2_instance_ids() + assert ec2_operations.tag_ec2_resources(client_method=ec2_client.create_tags, resource_ids=resource_ids, tags=tags) == 2 diff --git a/tests/unittest/cloud_governance/common/clouds/aws/mocks/__init__.py b/tests/unittest/cloud_governance/common/clouds/aws/mocks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittest/cloud_governance/common/clouds/aws/mocks/aws_mock.py b/tests/unittest/cloud_governance/common/clouds/aws/mocks/aws_mock.py new file mode 100644 index 00000000..32f55ee1 --- /dev/null +++ b/tests/unittest/cloud_governance/common/clouds/aws/mocks/aws_mock.py @@ -0,0 +1,60 @@ +from functools import wraps +from unittest.mock import patch + +import pandas + +from cloud_governance.common.clouds.aws.athena.abstract_athena_operations import AbstractAthenaOperations +from cloud_governance.common.clouds.aws.athena.boto3_client_athena_operations import BotoClientAthenaOperations +from cloud_governance.common.clouds.aws.athena.pyathena_operations import PyAthenaOperations + + +class ParameterNotFoundException(Exception): + def __init__(self, parameter_name): + self.parameter_name = parameter_name + super().__init__(f"Parameter '{parameter_name}' not found.") + + def __str__(self): + return f"ParameterNotFoundException: Parameter '{self.parameter_name}' not found." + + +def mock_execute_query(cls, *args, **kwargs): + """ + This method mocks + :param cls: + :param args: + :param kwargs: + :return: + """ + if kwargs.get('query_string'): + data = { + "A": [1, 2, 3], + "B": [0, 1, 2] + } + df1 = pandas.DataFrame(data) + return df1.to_dict(orient='records') + else: + raise ParameterNotFoundException('query_string') + + +def mock_athena(method): + """ + Mocking aws athena + :param method: + :return: + """ + @wraps(method) + def method_wrapper(*args, **kwargs): + """ + This is wrapper method to wrap the athena + :param args: + :param kwargs: + :return: + """ + try: + with patch.object(PyAthenaOperations, 'execute_query', mock_execute_query), \ + patch.object(BotoClientAthenaOperations, 'execute_query', mock_execute_query): + result = method(*args, **kwargs) + except Exception as err: + raise err + return result + return method_wrapper diff --git a/tests/unittest/cloud_governance/common/ldap/test_ldap_search.py b/tests/unittest/cloud_governance/common/ldap/test_ldap_search.py index 81ba090a..cb61c3b7 100644 --- a/tests/unittest/cloud_governance/common/ldap/test_ldap_search.py +++ b/tests/unittest/cloud_governance/common/ldap/test_ldap_search.py @@ -12,4 +12,4 @@ def mock_search_s(cls, base, scope, filterstr=None, attrlist=None): @patch.object(SimpleLDAPObject, 'search_s', mock_search_s) def test_get_details(): ldap_object = LdapSearch(ldap_host_name='example.com') - assert list(ldap_object.get_details(user_name='test').keys()) == ['displayName', 'manager', 'cn'] + assert list(ldap_object._LdapSearch__get_details(user_name='test').keys()) == ['displayName', 'manager', 'cn'] diff --git a/tests/unittest/cloud_governance/common/utils/__init__.py b/tests/unittest/cloud_governance/common/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittest/cloud_governance/common/utils/test_utils.py b/tests/unittest/cloud_governance/common/utils/test_utils.py new file mode 100644 index 00000000..769e6357 --- /dev/null +++ b/tests/unittest/cloud_governance/common/utils/test_utils.py @@ -0,0 +1,21 @@ +import boto3 +from moto import mock_ec2 + +from cloud_governance.common.clouds.aws.utils.utils import Utils + + +@mock_ec2 +def test_tag_aws_resources(): + """ + This method tag aws resources + :return: + """ + region_name = 'ap-south-1' + ec2_client = boto3.client('ec2', region_name=region_name) + common_utils = Utils(region=region_name) + resource_ids = [] + for num in range(30): + instance_id = ec2_client.run_instances(MinCount=1, MaxCount=1)['Instances'][0]['InstanceId'] + resource_ids.append(instance_id) + expected_res = common_utils.tag_aws_resources(ec2_client.create_tags, tags=[{'Key': 'User', 'Value': 'test'}], resource_ids=resource_ids) + assert expected_res == 2 diff --git a/tests/unittest/cloud_resource_orchestration/aws/test_monitor_long_run.py b/tests/unittest/cloud_resource_orchestration/aws/test_monitor_long_run.py deleted file mode 100644 index 2c6cac8a..00000000 --- a/tests/unittest/cloud_resource_orchestration/aws/test_monitor_long_run.py +++ /dev/null @@ -1,31 +0,0 @@ -import boto3 -from moto import mock_ec2 - -from cloud_governance.cloud_resource_orchestration.aws.long_run.monitor_long_run import MonitorLongRun -from cloud_governance.cloud_resource_orchestration.aws.long_run.tag_long_run import TagLongRun -from tests.unittest.cloud_resource_orchestration.mocks.mock_jira_operations import jira_mock - -AWS_DEFAULT_REGION = 'ap-south-1' - - -@jira_mock -@mock_ec2 -def test_monitor_long_run(): - """ - This method tests monitoring of long run - """ - default_ami_id = 'ami-03cf127a' - ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) - tags = [{'Key': 'JiraId', 'Value': 'test'}] - instance_id = ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1, - TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}])['Instances'][0]['InstanceId'] - tag_long_run = TagLongRun(region_name=AWS_DEFAULT_REGION) - response = tag_long_run.run() - result = False - if response: - monitor_long_run = MonitorLongRun(region_name=AWS_DEFAULT_REGION) - response = monitor_long_run.monitor_instances() - if response: - value = list(response.values())[0] - result = value == instance_id - assert result diff --git a/tests/unittest/cloud_resource_orchestration/aws/test_tag_long_run.py b/tests/unittest/cloud_resource_orchestration/aws/test_tag_long_run.py deleted file mode 100644 index 83be17a2..00000000 --- a/tests/unittest/cloud_resource_orchestration/aws/test_tag_long_run.py +++ /dev/null @@ -1,27 +0,0 @@ - -import boto3 -from moto import mock_ec2 - -from cloud_governance.cloud_resource_orchestration.aws.long_run.tag_long_run import TagLongRun -from tests.unittest.cloud_resource_orchestration.mocks.mock_jira_operations import jira_mock - -AWS_DEFAULT_REGION = 'ap-south-1' - - -@jira_mock -@mock_ec2 -def test_tag_long_run(): - """ - This method tests the tag long run - """ - default_ami_id = 'ami-03cf127a' - ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) - tags = [{'Key': 'JiraId', 'Value': 'test'}] - instance_id = ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1, - TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}])['Instances'][0]['InstanceId'] - tag_long_run = TagLongRun(region_name=AWS_DEFAULT_REGION) - response = tag_long_run.run() - if response: - assert response['test'][0] == instance_id - else: - assert False diff --git a/tests/unittest/cloud_resource_orchestration/common/test_ec2_monitor_operations.py b/tests/unittest/cloud_resource_orchestration/common/test_ec2_monitor_operations.py deleted file mode 100644 index ae43dc4d..00000000 --- a/tests/unittest/cloud_resource_orchestration/common/test_ec2_monitor_operations.py +++ /dev/null @@ -1,24 +0,0 @@ -import time - -import boto3 -from moto import mock_ec2 - -from cloud_governance.cloud_resource_orchestration.common.ec2_monitor_operations import EC2MonitorOperations - -AWS_DEFAULT_REGION = 'ap-south-1' - - -@mock_ec2 -def test_get_instance_run_hours(): - """" - This method tests current instance running hours - """ - default_ami_id = 'ami-03cf127a' - ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) - ec2_monitor_operations = EC2MonitorOperations(region_name=AWS_DEFAULT_REGION) - tags = [{'Key': 'JiraId', 'Value': 'test'}] - ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1, - TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}]) - time.sleep(5) - hours, _ = ec2_monitor_operations.get_instance_run_hours(instance=ec2_client.describe_instances()['Reservations'][0]['Instances'][0], jira_id='test') - assert hours > 0 diff --git a/tests/unittest/cloud_resource_orchestration/mocks/mock_jira_operations.py b/tests/unittest/cloud_resource_orchestration/mocks/mock_jira_operations.py deleted file mode 100644 index 979c74c7..00000000 --- a/tests/unittest/cloud_resource_orchestration/mocks/mock_jira_operations.py +++ /dev/null @@ -1,61 +0,0 @@ - -from functools import wraps -from unittest.mock import patch - - -from cloud_governance.common.jira.jira_operations import JiraOperations - - -def mock_get_issue(*args, **kwargs): - """This method mock the get_issue from the jira""" - if kwargs.get('jira_id'): - return {'fields': { - 'status': {'name': 'Refinement'}, - 'description': "First Name: Test\n" - "Last Name: Mock\nEmail Address: mock@gmail.com\n" - "Manager Approval Address: manager@gmail.com\nCC-Users: \nDays: 5\n" - "Project: mock-test\nRegion: ap-south-1\nFull Summary: This is the test mock test\n" - "Cloud Name: mock\nAccount Name: mock-account\nInstance Types: t2.micro: 5\n" - "Cost Estimation:12.0\nDetails: This is the test machine \n" - "ApprovedManager: mockapproval@gmail.com \n" - }} - - -def mock_get_jira_id_sub_tasks(*args, **kwargs): - """This method mock get_jira_id_sub_tasks""" - if kwargs.get('jira_id'): - return ['subtask-1'] - return {} - - -def mock_move_issue_state(*args, **kwargs): - """this method mock mock_move_issue_state""" - if kwargs.get('jira_id') and kwargs.get('state'): - return True - return False - - -def jira_mock(method): - """ - Mocking the ibm SoftLayer client methods - @param method: - @return: - """ - @wraps(method) - def method_wrapper(*args, **kwargs): - """ - This is the wrapper method to wraps the method inside the function - @param args: - @param kwargs: - @return: - """ - result = '' - try: - with patch.object(JiraOperations, 'get_issue', mock_get_issue), \ - patch.object(JiraOperations, 'get_jira_id_sub_tasks', mock_get_jira_id_sub_tasks),\ - patch.object(JiraOperations, 'move_issue_state', mock_move_issue_state): - result = method(*args, **kwargs) - except Exception as err: - pass - return result - return method_wrapper diff --git a/tests_requirements.txt b/tests_requirements.txt index 2e348f52..61379964 100644 --- a/tests_requirements.txt +++ b/tests_requirements.txt @@ -1,19 +1,18 @@ -boto3==1.26.1 +aiohttp==3.8.1 +azure-identity==1.12.0 +azure-mgmt-costmanagement==3.0.0 +azure-mgmt-billing==6.0.0 +azure-mgmt-subscription==3.1.1 +boto3==1.26.4 elasticsearch==7.11.0 elasticsearch-dsl==7.4.0 moto==2.3.2 +oauthlib~=3.1.1 pandas -requests==2.27.1 -typeguard==2.13.3 +pytest python-ldap==3.4.2 -SoftLayer==6.0.0 +requests==2.31.0 retry==0.9.2 -azure-identity==1.12.0 -azure-mgmt-costmanagement==3.0.0 -azure-mgmt-subscription==3.1.1 -azure-mgmt-billing==6.0.0 - -# EC2 LongRun Required by Jira -aiohttp==3.8.1 +typeguard==2.13.3 +SoftLayer==6.0.0 urllib3==1.26.7 -oauthlib~=3.1.1 \ No newline at end of file