+ Your {cloud_name} cost usage in the last {self.__cro_duration_days} days is $ {user_cost} and {message}
+ You must open the project ticket in the following
Link.
+ After submitting a ticket, you must add Tag (TicketId:#) to every active resource that is related to the project ticket.
+
+ If you have any questions, please let us know in slack channel #perf-dept-public-clouds
+
+ {self.FOOTER}
+"""
+ return subject, body
+
+ def cro_request_for_manager_approval(self, manager: str, request_user: str, cloud_name: str, ticket_id: str, description: dict, **kwargs):
+ """
+ This method returns the message for manager, regarding user approval
+ :param description:
+ :param ticket_id:
+ :param manager:
+ :param request_user:
+ :param cloud_name:
+ :return:
+ """
+ subject = '[Action required]: Cloud Resources Budget Request Approval'
+ manager_full_name = self.get_user_ldap_details(user_name=manager)
+ user_full_name = self.get_user_ldap_details(user_name=request_user)
+ ticket_id = ticket_id.split('-')[-1]
+ context = {'manager': manager, 'manager_full_name': manager_full_name, 'user_full_name': user_full_name,
+ 'ticket_id': ticket_id, 'portal': self.__portal, 'request_user': request_user, 'description': description,
+ 'footer': self.FOOTER}
+ template_loader = self.env_loader.get_template('cro_request_for_manager_approval.j2')
+ context['extra_message'] = kwargs.get('extra_message', '')
+ body = template_loader.render(context)
+ return subject, body
+
+ def cro_send_user_alert_to_add_tags(self, user: str, ticket_ids: list):
+ """
+ This method return the subject, body for adding tags
+ :param user:
+ :param ticket_ids:
+ :return:
+ """
+ subject = '[Action required]: Add TicketId tag'
+ ticket_ids = "\n".join([f"
{val}" for idx, val in enumerate(ticket_ids)])
+ user_display_name = self.get_user_ldap_details(user_name=user)
+ body = f"""
+
Hi {user_display_name},
+
You have the following Approved JIRA Ticket-Ids
+
+ Currently, there are several instances running over budget, kindly review and tag instances with TicketId: #
+
Please find the below attached document.
+
+ {self.FOOTER}
+ """
+ return subject, body
+
+ def cro_send_closed_alert(self, user: str, ticket_id: str):
+ """
+ This method send cro ticket close alert
+ :param user:
+ :param ticket_id:
+ :return:
+ """
+ subject = 'Closing Cloud Budget Request ticket'
+ ticket_id = ticket_id.split('-')[-1]
+ user_full_name = self.get_user_ldap_details(user_name=user)
+ body = f"""
+
Hi {user_full_name},
+
+ Your cloud budget request ( TicketId: {ticket_id} ) duration expired and the ticket auto closed.
+ You can find the summary in
Portal.
+
+ {self.FOOTER}
+ """
+ return subject, body
+
+ def filter_resources_on_days(self, resources: dict):
+ """
+ This method return the resources based on the days
+ :param resources:
+ :param days:
+ :return:
+ """
+ resources_by_days = {}
+ for policy_name, resource_data in resources.items():
+ for region_name, policy_region_data in resource_data.items():
+ for data_item in policy_region_data:
+ resources_by_days.setdefault(data_item.get('Days'), []).append(data_item)
+ return resources_by_days
+
+ def get_data_in_html_table_format(self, resources: dict):
+ """
+ This method returns user policy alerts in HTML table format
+ :param resources:
+ :return:
+ """
+ style = """
+
+ """
+ html_table_format = f"""{style}
"""
+ thead_values = ['Policy', 'Region', 'ResourceId', 'Name', 'Action', 'DeletedDay']
+ th_elements = ''.join([f'{value} | ' for value in thead_values])
+ html_table_format += f'{th_elements}
'
+ for days, resource_data in resources.items():
+ resource_data = sorted(resource_data, key=lambda item: (item.get('Policy'), item.get('Region')))
+ for resource in resource_data:
+ html_table_format += ''
+ for th_value in thead_values:
+ if 'Deleted' == resource.get(th_value):
+ html_table_format += f"{resource.get(th_value)} 🗑 | "
+ else:
+ html_table_format += f"""{resource.get(th_value)} | """
+ html_table_format += '
'
+ html_table_format += '
'
+ return html_table_format
+
+ def get_agg_policies_mail_message(self, user: str, user_resources: dict):
+ """
+ This method returns the message for the aggregated alert of all policies
+ :param user:
+ :param user_resources:
+ :return:
+ """
+ display_name = self.get_user_ldap_details(user_name=user)
+ resources_by_days = self.filter_resources_on_days(resources=user_resources)
+ table_data = self.get_data_in_html_table_format(resources=resources_by_days)
+ display_name = display_name if display_name else user
+ subject = f'Cloud Governance: Policy Alerts'
+ body = f"""
+
+
+
You can find below your unused resources in the {self.__public_cloud_name} account ({self.account}).
+
If you want to keep them, please add "Policy=Not_Delete" or "Policy=skip" tag for each resource
+ {table_data}
+
+
{self.RESTRICTION}
+ {self.FOOTER}
+"""
+ return subject, body
+
+ def cro_monitor_budget_remain_alert(self, ticket_id: str, budget: int, user: str, used_budget: int, remain_budget: int):
+ """
+ This method returns subject, body for the budget remain alert
+ :param ticket_id:
+ :param budget:
+ :param user:
+ :param used_budget:
+ :param remain_budget:
+ :return:
+ """
+ ticket_id = ticket_id.split('-')[-1]
+ subject = f'[Action required] Cloud Resources Budget Remain'
+ user_display_name = self.get_user_ldap_details(user_name=user)
+ template_loader = self.env_loader.get_template('cro_monitor_budget_remain_alert.j2')
+ context = {'name': user_display_name, 'ticket_id': ticket_id, 'portal': self.__portal,
+ 'budget': budget, 'used_budget': used_budget, 'remain_budget': remain_budget, 'footer': self.FOOTER}
+ body = template_loader.render(context)
+ return subject, body
+
+ def cro_monitor_budget_remain_high_alert(self, ticket_id: str, budget: int, user: str, used_budget: int, remain_budget: int):
+ """
+ This method returns subject, body for the budget completed high alert
+ :param ticket_id:
+ :param budget:
+ :param user:
+ :param used_budget:
+ :param remain_budget:
+ :return:
+ """
+ ticket_id = ticket_id.split('-')[-1]
+ subject = f'[Action required] Cloud Resources Budget Remain'
+ user_display_name = self.get_user_ldap_details(user_name=user)
+ template_loader = self.env_loader.get_template('cro_monitor_budget_remain_high_alert.j2')
+ context = {'name': user_display_name, 'ticket_id': ticket_id, 'portal': self.__portal,
+ 'budget': budget, 'used_budget': used_budget, 'remain_budget': remain_budget,
+ 'footer': self.FOOTER}
+ body = template_loader.render(context)
return subject, body
diff --git a/cloud_governance/common/mails/postfix.py b/cloud_governance/common/mails/postfix.py
index 266b9d3d..4cc9191c 100644
--- a/cloud_governance/common/mails/postfix.py
+++ b/cloud_governance/common/mails/postfix.py
@@ -34,6 +34,10 @@ def __init__(self):
self.__es_port = self.__environment_variables_dict.get('es_port', '')
self.__account = self.__environment_variables_dict.get('account', '')
self.__policy_output = self.__environment_variables_dict.get('policy_output', '')
+ self.__default_admins = self.__environment_variables_dict.get('DEFAULT_ADMINS')
+ self.__email_alert = self.__environment_variables_dict.get('EMAIL_ALERT')
+ self.__mail_to = self.__environment_variables_dict.get('EMAIL_TO') # testing purposes
+ self.__mail_cc = self.__environment_variables_dict.get('EMAIL_CC')
self.bucket_name, self.key = self.get_bucket_name()
self.__es_index = 'cloud-governance-mail-messages'
if self.__es_host:
@@ -53,62 +57,72 @@ def get_bucket_name(self):
@logger_time_stamp
def send_email_postfix(self, subject: str, to: any, cc: list, content: str, **kwargs):
- msg = MIMEMultipart('alternative')
- msg["Subject"] = subject
- msg["From"] = "%s <%s>" % (
- 'cloud-governance',
- "@".join(["noreply-cloud-governance", 'redhat.com']),
- )
- if isinstance(to, str):
- msg["To"] = "@".join([to, 'redhat.com'])
- elif isinstance(to, list):
- msg["To"] = ", ".join(to)
- msg["Cc"] = ",".join(cc)
- # msg.add_header("Reply-To", self.reply_to)
- # msg.add_header("User-Agent", self.reply_to)
- if kwargs.get('filename'):
- attachment = MIMEText(open(kwargs['filename']).read())
- attachment.add_header('Content-Disposition', 'attachment',
- filename=kwargs['filename'].split('/')[-1])
- msg.attach(attachment)
- if kwargs.get('mime_type'):
- msg.attach(MIMEText(content, kwargs.get('mime_type')))
- else:
- msg.attach(MIMEText(content))
- email_string = msg.as_string()
- email_host = 'localhost'
- try:
- with smtplib.SMTP(email_host) as s:
- try:
- logger.debug(email_string)
- s.send_message(msg)
- if isinstance(to, str):
- logger.info(f'Mail sent successfully to {to}@redhat.com')
- elif isinstance(to, list):
- logger.info(f'Mail sent successfully to {", ".join(to)}@redhat.com')
- if kwargs.get('filename'):
- file_name = kwargs['filename'].split('/')[-1]
- date_key = datetime.datetime.now().strftime("%Y%m%d%H")
- if self.__policy_output:
- self.__s3_operations.upload_file(file_name_path=kwargs['filename'],
- bucket=self.bucket_name, key=f'{self.key}/{self.__policy}/{date_key}',
- upload_file=file_name)
- s3_path = f'{self.__policy_output}/logs/{self.__policy}/{date_key}/{file_name}'
- content += f'\n\nresource_file_path: s3://{s3_path}\n\n'
- data = {'Policy': self.__policy, 'To': to, 'Cc': cc, 'Message': content, 'Account': self.__account.upper(), 'MessageType': kwargs.get('message_type')}
- if kwargs.get('resource_id'):
- data['resource_id'] = kwargs['resource_id']
- if kwargs.get('extra_purse'):
- data['extra_purse'] = round(kwargs['extra_purse'], 3)
- if self.__es_host:
- self.__es_operations.upload_to_elasticsearch(data=data, index=self.__es_index)
- logger.info(f'Uploaded to es index: {self.__es_index}')
- else:
- logger.info('Error missing the es_host')
- except smtplib.SMTPException as ex:
- logger.info(f'Error while sending mail, {ex}')
- return False
- return True
- except Exception as err:
- logger.info(f'Some error occurred, {err}')
- return False
+ if self.__email_alert:
+ if self.__mail_to:
+ to = self.__mail_to
+ if self.__mail_cc:
+ cc = self.__mail_cc
+ cc = [cc_user for cc_user in cc if to and to not in cc_user]
+ cc = [cc_user if '@redhat.com' in cc_user else f'{cc_user}@redhat.com' for cc_user in cc]
+ msg = MIMEMultipart('alternative')
+ msg["Subject"] = subject
+ msg["From"] = "%s <%s>" % (
+ 'cloud-governance',
+ "@".join(["noreply-cloud-governance", 'redhat.com']),
+ )
+ if isinstance(to, str):
+ msg["To"] = "@".join([to, 'redhat.com'])
+ elif isinstance(to, list):
+ msg["To"] = ", ".join(to)
+ msg["Cc"] = ",".join(cc)
+ # msg.add_header("Reply-To", self.reply_to)
+ # msg.add_header("User-Agent", self.reply_to)
+ if kwargs.get('filename'):
+ attachment = MIMEText(open(kwargs['filename']).read())
+ attachment.add_header('Content-Disposition', 'attachment',
+ filename=kwargs['filename'].split('/')[-1])
+ msg.attach(attachment)
+ if kwargs.get('mime_type'):
+ msg.attach(MIMEText(content, kwargs.get('mime_type')))
+ else:
+ msg.attach(MIMEText(content))
+ email_string = msg.as_string()
+ email_host = 'localhost'
+ try:
+ with smtplib.SMTP(email_host) as s:
+ try:
+ logger.debug(email_string)
+ s.send_message(msg)
+ if isinstance(to, str):
+ logger.warn(f'Mail sent successfully to {to}@redhat.com')
+ elif isinstance(to, list):
+ logger.warn(f'Mail sent successfully to {", ".join(to)}@redhat.com')
+ if kwargs.get('filename'):
+ file_name = kwargs['filename'].split('/')[-1]
+ date_key = datetime.datetime.now().strftime("%Y%m%d%H")
+ if self.__policy_output:
+ self.__s3_operations.upload_file(file_name_path=kwargs['filename'],
+ bucket=self.bucket_name, key=f'{self.key}/{self.__policy}/{date_key}',
+ upload_file=file_name)
+ s3_path = f'{self.__policy_output}/logs/{self.__policy}/{date_key}/{file_name}'
+ content += f'\n\nresource_file_path: s3://{s3_path}\n\n'
+ es_data = kwargs.get('es_data')
+ data = {'Policy': self.__policy, 'To': to, 'Cc': cc, 'Message': content, 'Account': self.__account.upper(), 'MessageType': kwargs.get('message_type', 'alert')}
+ if es_data:
+ data.update(es_data)
+ if kwargs.get('resource_id'):
+ data['resource_id'] = kwargs['resource_id']
+ if kwargs.get('extra_purse'):
+ data['extra_purse'] = round(kwargs['extra_purse'], 3)
+ if self.__es_host:
+ self.__es_operations.upload_to_elasticsearch(data=data, index=self.__es_index)
+ logger.warn(f'Uploaded to es index: {self.__es_index}')
+ else:
+ logger.warn('Error missing the es_host')
+ except smtplib.SMTPException as ex:
+ logger.error(f'Error while sending mail, {ex}')
+ return False
+ return True
+ except Exception as err:
+ logger.error(f'Some error occurred, {err}')
+ return False
diff --git a/cloud_governance/common/mails/templates/cro_monitor_budget_remain_alert.j2 b/cloud_governance/common/mails/templates/cro_monitor_budget_remain_alert.j2
new file mode 100644
index 00000000..6967b1a5
--- /dev/null
+++ b/cloud_governance/common/mails/templates/cro_monitor_budget_remain_alert.j2
@@ -0,0 +1,12 @@
+
+
+
You completed {{used_budget}} / {{budget}} of budget till today of the TicketId: {{ticket_id}}.
+ Remaining budget will be {{remain_budget}}.
+
Open the budget extension if further needed.
+
This ticket will be closed, if the budget will exceeds 110%.
+
You can extend the budget here.
+
Visit the wiki page to get more information
+
+{{footer}}
diff --git a/cloud_governance/common/mails/templates/cro_monitor_budget_remain_high_alert.j2 b/cloud_governance/common/mails/templates/cro_monitor_budget_remain_high_alert.j2
new file mode 100644
index 00000000..06cf2176
--- /dev/null
+++ b/cloud_governance/common/mails/templates/cro_monitor_budget_remain_high_alert.j2
@@ -0,0 +1,11 @@
+
+
+
You completed {{used_budget}} / {{budget}} of budget till today of the TicketId: {{ticket_id}}.
+ You exceed budget {{-1*remain_budget}} more.
+
Open the budget extension if further needed.
+
You can extend the budget here.
+
Visit the wiki page to get more information
+
+{{footer}}
diff --git a/cloud_governance/common/mails/templates/cro_request_for_manager_approval.j2 b/cloud_governance/common/mails/templates/cro_request_for_manager_approval.j2
new file mode 100644
index 00000000..bfc54109
--- /dev/null
+++ b/cloud_governance/common/mails/templates/cro_request_for_manager_approval.j2
@@ -0,0 +1,18 @@
+
Hi {{manager_full_name}},
+{%if extra_message %}
{{extra_message}}
{%endif%}
+
+ {{user_full_name}} is waiting for your project cloud budget approval
+ Please approve the request in the following url
{{portal}}
+
+
+ Description of the New Request
+
+ {% for key, value in description.items() %}
+ {{key}}: {{value}}
+ {% endfor %}
+
+ If you are not able to approve from the site.
+ Please click here to send mail approve,
Approve request or
+
Reject Request.
+ Note: Highly recommend to use site for approving requests
+{{footer}}
diff --git a/cloud_governance/common/pandas/__init__.py b/cloud_governance/common/pandas/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/cloud_governance/common/pandas/pandas_operations.py b/cloud_governance/common/pandas/pandas_operations.py
new file mode 100644
index 00000000..1559b267
--- /dev/null
+++ b/cloud_governance/common/pandas/pandas_operations.py
@@ -0,0 +1,55 @@
+import tempfile
+
+import pandas as pd
+import typeguard
+
+from cloud_governance.common.clouds.aws.s3.s3_operations import S3Operations
+from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp
+
+
+class PandasOperations:
+ """
+ This class performs the pandas operations
+ """
+ CHUNK_SIZE = 5000
+
+ def __init__(self, region_name: str = 'us-east-1'):
+ self.__s3_operations = S3Operations(region_name=region_name)
+
+ @typeguard.typechecked
+ @logger_time_stamp
+ def get_dataframe_from_csv_file(self, file_path: str):
+ """
+ This method returns the pandas dataframe from the csv file
+ :param file_path:
+ :return:
+ """
+ dataframes = []
+ for data_chunk in pd.read_csv(filepath_or_buffer=file_path, chunksize=self.CHUNK_SIZE):
+ dataframes.append(data_chunk)
+ dataframe = pd.concat(dataframes, ignore_index=True)
+ return dataframe
+
+ @typeguard.typechecked
+ @logger_time_stamp
+ def get_dataframe_from_s3_file(self, bucket: str, key: str, download_file: str):
+ """
+ This method returns the pandas dataframe from the s3 file
+ :return:
+ """
+ if not self.__s3_operations.file_exist(bucket=bucket, key=key, file_name=download_file):
+ raise FileNotFoundError(f"{key}/{download_file} path is not exists else verify your credentials")
+ with tempfile.NamedTemporaryFile(suffix='.csv', mode='w') as file_name:
+ self.__s3_operations.download_file(bucket=bucket, key=key, download_file=download_file,
+ file_name_path=file_name.name)
+ return self.get_dataframe_from_csv_file(file_path=file_name.name)
+
+ @typeguard.typechecked
+ @logger_time_stamp
+ def get_data_dictonary_from_dataframe(self, dataframe: pd.DataFrame):
+ """
+ This method returns the dataframe format to dictonary order
+ :param dataframe:
+ :return:
+ """
+ return dataframe.to_dict(orient='records')
diff --git a/cloud_governance/main/environment_variables.py b/cloud_governance/main/environment_variables.py
index 6564f5e8..a50d8ef6 100644
--- a/cloud_governance/main/environment_variables.py
+++ b/cloud_governance/main/environment_variables.py
@@ -1,6 +1,12 @@
+import argparse
import os
-from cloud_governance.common.clouds.aws.iam.iam_operations import IAMOperations
+import tempfile
+from ast import literal_eval
+
+import boto3
+
+from cloud_governance.main.environment_variables_exceptions import ParseFailed
class EnvironmentVariables:
@@ -31,24 +37,30 @@ def __init__(self):
##################################################################################################
# dynamic parameters - configure for local run
# parameters for running policies
- self._environment_variables_dict['account'] = EnvironmentVariables.get_env('account', '').upper()
+ self._environment_variables_dict['account'] = EnvironmentVariables.get_env('account', '').upper().strip()
self._environment_variables_dict['AWS_DEFAULT_REGION'] = EnvironmentVariables.get_env('AWS_DEFAULT_REGION', '')
-
+ self._environment_variables_dict['log_level'] = EnvironmentVariables.get_env('log_level', 'INFO')
+ self._environment_variables_dict['PRINT_LOGS'] = EnvironmentVariables.get_boolean_from_environment('PRINT_LOGS', True)
+ if not self._environment_variables_dict['AWS_DEFAULT_REGION']:
+ self._environment_variables_dict['AWS_DEFAULT_REGION'] = 'us-east-2'
+ self._environment_variables_dict['PUBLIC_CLOUD_NAME'] = EnvironmentVariables.get_env('PUBLIC_CLOUD_NAME', 'AWS')
if EnvironmentVariables.get_env('AWS_ACCESS_KEY_ID', '') and EnvironmentVariables.get_env('AWS_SECRET_ACCESS_KEY', ''):
- self.iam_operations = IAMOperations()
- self._environment_variables_dict['account'] = self.iam_operations.get_account_alias_cloud_name()[0].upper()
-
+ self._environment_variables_dict['account'] = self.get_aws_account_alias_name().upper().replace('OPENSHIFT-', '')
self._environment_variables_dict['policy'] = EnvironmentVariables.get_env('policy', '')
self._environment_variables_dict['aws_non_cluster_policies'] = ['ec2_idle', 'ec2_stop', 'ec2_run', 'ebs_in_use',
'ebs_unattached', 's3_inactive',
'empty_roles', 'ip_unattached',
- 'nat_gateway_unused',
+ 'unused_nat_gateway',
'zombie_snapshots', 'skipped_resources',
'monthly_report']
- self._environment_variables_dict['cost_policies'] = ['cost_explorer', 'cost_over_usage', 'cost_billing_reports', 'cost_explorer_payer_billings']
+ es_index = 'cloud-governance-policy-es-index'
+ self._environment_variables_dict['cost_policies'] = ['cost_explorer', 'cost_over_usage', 'cost_billing_reports',
+ 'cost_explorer_payer_billings', 'spot_savings_analysis']
self._environment_variables_dict['ibm_policies'] = ['tag_baremetal', 'tag_vm', 'ibm_cost_report',
'ibm_cost_over_usage']
+ if self._environment_variables_dict['policy'] in self._environment_variables_dict['cost_policies']:
+ es_index = 'cloud-governance-global-cost-billing-index'
# AWS env vars
self._environment_variables_dict['resource_name'] = EnvironmentVariables.get_env('resource_name', '')
@@ -64,6 +76,7 @@ def __init__(self):
self._environment_variables_dict['service_type'] = EnvironmentVariables.get_env('service_type', '')
self._environment_variables_dict['TABLE_NAME'] = EnvironmentVariables.get_env('TABLE_NAME', '')
self._environment_variables_dict['REPLACE_ACCOUNT_NAME'] = EnvironmentVariables.get_env('REPLACE_ACCOUNT_NAME', '{}')
+ self._environment_variables_dict['DAYS_TO_DELETE_RESOURCE'] = int(EnvironmentVariables.get_env('DAYS_TO_DELETE_RESOURCE', '7'))
# AWS Cost Explorer tags
self._environment_variables_dict['cost_metric'] = EnvironmentVariables.get_env('cost_metric', 'UnblendedCost')
@@ -71,6 +84,7 @@ def __init__(self):
self._environment_variables_dict['end_date'] = EnvironmentVariables.get_env('end_date', '')
self._environment_variables_dict['granularity'] = EnvironmentVariables.get_env('granularity', 'DAILY')
self._environment_variables_dict['cost_explorer_tags'] = EnvironmentVariables.get_env('cost_explorer_tags', '{}')
+ self._environment_variables_dict['PUBLIC_CLOUD_NAME'] = EnvironmentVariables.get_env('PUBLIC_CLOUD_NAME', 'AWS')
# AZURE Credentials
self._environment_variables_dict['AZURE_ACCOUNT_ID'] = EnvironmentVariables.get_env('AZURE_ACCOUNT_ID', '')
@@ -80,7 +94,7 @@ def __init__(self):
if self._environment_variables_dict['AZURE_CLIENT_ID'] and self._environment_variables_dict['AZURE_TENANT_ID']\
and self._environment_variables_dict['AZURE_CLIENT_SECRET']:
self._environment_variables_dict['PUBLIC_CLOUD_NAME'] = 'AZURE'
- self._environment_variables_dict['TOTAL_ACCOUNTS'] = bool(EnvironmentVariables.get_env('TOTAL_ACCOUNTS', ''))
+ self._environment_variables_dict['TOTAL_ACCOUNTS'] = EnvironmentVariables.get_boolean_from_environment('TOTAL_ACCOUNTS', False)
# IBM env vars
self._environment_variables_dict['IBM_ACCOUNT_ID'] = EnvironmentVariables.get_env('IBM_ACCOUNT_ID', '')
@@ -98,7 +112,7 @@ def __init__(self):
# Common env vars
self._environment_variables_dict['dry_run'] = EnvironmentVariables.get_env('dry_run', 'yes')
- self._environment_variables_dict['FORCE_DELETE'] = EnvironmentVariables.get_env('FORCE_DELETE', False)
+ self._environment_variables_dict['FORCE_DELETE'] = EnvironmentVariables.get_boolean_from_environment('FORCE_DELETE', False)
self._environment_variables_dict['policy_output'] = EnvironmentVariables.get_env('policy_output', '')
self._environment_variables_dict['bucket'] = EnvironmentVariables.get_env('bucket', '')
self._environment_variables_dict['file_path'] = EnvironmentVariables.get_env('file_path', '')
@@ -109,7 +123,7 @@ def __init__(self):
self._environment_variables_dict['upload_data_es'] = EnvironmentVariables.get_env('upload_data_es', '')
self._environment_variables_dict['es_host'] = EnvironmentVariables.get_env('es_host', '')
self._environment_variables_dict['es_port'] = EnvironmentVariables.get_env('es_port', '')
- self._environment_variables_dict['es_index'] = EnvironmentVariables.get_env('es_index', '')
+ self._environment_variables_dict['es_index'] = EnvironmentVariables.get_env('es_index', es_index)
self._environment_variables_dict['es_doc_type'] = EnvironmentVariables.get_env('es_doc_type', '')
self._environment_variables_dict['ES_TIMEOUT'] = EnvironmentVariables.get_env('ES_TIMEOUT', 2000)
@@ -137,6 +151,8 @@ def __init__(self):
# AWS Top Acconut
self._environment_variables_dict['AWS_ACCOUNT_ROLE'] = EnvironmentVariables.get_env('AWS_ACCOUNT_ROLE', '')
+ self._environment_variables_dict['PAYER_SUPPORT_FEE_CREDIT'] = EnvironmentVariables.get_env('PAYER_SUPPORT_FEE_CREDIT', 0)
+ self._environment_variables_dict['TEMPORARY_DIR'] = EnvironmentVariables.get_env('TEMPORARY_DIR', '/tmp')
self._environment_variables_dict['COST_CENTER_OWNER'] = EnvironmentVariables.get_env('COST_CENTER_OWNER', '{}')
# Jira env parameters
@@ -147,13 +163,112 @@ def __init__(self):
self._environment_variables_dict['JIRA_PASSWORD'] = EnvironmentVariables.get_env('JIRA_PASSWORD', '')
# Cloud Resource Orchestration
+ self._environment_variables_dict['CRO_PORTAL'] = EnvironmentVariables.get_env('CRO_PORTAL', '')
self._environment_variables_dict['CLOUD_NAME'] = EnvironmentVariables.get_env('CLOUD_NAME', '')
self._environment_variables_dict['MONITOR'] = EnvironmentVariables.get_env('MONITOR', '')
- self._environment_variables_dict['MANAGEMENT'] = bool(EnvironmentVariables.get_env('MANAGEMENT', False))
+ self._environment_variables_dict['MANAGEMENT'] = EnvironmentVariables.get_boolean_from_environment('MANAGEMENT', False)
+
+ # GCP Account
+ self._environment_variables_dict['GCP_DATABASE_NAME'] = EnvironmentVariables.get_env('GCP_DATABASE_NAME')
+ self._environment_variables_dict['GCP_DATABASE_TABLE_NAME'] = EnvironmentVariables.get_env('GCP_DATABASE_TABLE_NAME')
+ if self._environment_variables_dict.get('GCP_DATABASE_TABLE_NAME'):
+ self._environment_variables_dict['PUBLIC_CLOUD_NAME'] = 'GCP'
+
+ self._environment_variables_dict['EMAIL_ALERT'] = EnvironmentVariables.get_boolean_from_environment('EMAIL_ALERT', True)
+ self._environment_variables_dict['MANAGER_EMAIL_ALERT'] = EnvironmentVariables.get_boolean_from_environment('MANAGER_EMAIL_ALERT', True)
+ self._environment_variables_dict['UPDATE_TAG_BULKS'] = int(EnvironmentVariables.get_env('UPDATE_TAG_BULKS', '20'))
+
+ # policies aggregate alert
+ self._environment_variables_dict['BUCKET_NAME'] = EnvironmentVariables.get_env('BUCKET_NAME')
+ self._environment_variables_dict['BUCKET_KEY'] = EnvironmentVariables.get_env('BUCKET_KEY')
+ self._environment_variables_dict['MAIL_ALERT_DAYS'] = literal_eval(EnvironmentVariables.get_env('MAIL_ALERT_DAYS', '[]'))
+ self._environment_variables_dict['POLICY_ACTIONS_DAYS'] = literal_eval(EnvironmentVariables.get_env('POLICY_ACTIONS_DAYS', '[]'))
+ self._environment_variables_dict['DEFAULT_ADMINS'] = literal_eval(EnvironmentVariables.get_env('DEFAULT_ADMINS', '[]'))
+ self._environment_variables_dict['KERBEROS_USERS'] = literal_eval(EnvironmentVariables.get_env('KERBEROS_USERS', '[]'))
+ self._environment_variables_dict['POLICIES_TO_ALERT'] = literal_eval(EnvironmentVariables.get_env('POLICIES_TO_ALERT', '[]'))
+ if self._environment_variables_dict.get('policy') in ['send_aggregated_alerts']:
+ self._environment_variables_dict['COMMON_POLICIES'] = True
+ # CRO -- Cloud Resource Orch
+ self._environment_variables_dict['CLOUD_RESOURCE_ORCHESTRATION'] = EnvironmentVariables.get_boolean_from_environment('CLOUD_RESOURCE_ORCHESTRATION', False)
+ self._environment_variables_dict['USER_COST_INDEX'] = EnvironmentVariables.get_env('USER_COST_INDEX', '')
+ self._environment_variables_dict['CRO_ES_INDEX'] = EnvironmentVariables.get_env('CRO_ES_INDEX', 'cloud-governance-resource-orchestration')
+ self._environment_variables_dict['CRO_COST_OVER_USAGE'] = int(EnvironmentVariables.get_env('CRO_COST_OVER_USAGE', '500'))
+ self._environment_variables_dict['CRO_DEFAULT_ADMINS'] = literal_eval(EnvironmentVariables.get_env('CRO_DEFAULT_ADMINS', "[]"))
+ self._environment_variables_dict['CRO_DURATION_DAYS'] = int(EnvironmentVariables.get_env('CRO_DURATION_DAYS', '30'))
+ self._environment_variables_dict['RUN_ACTIVE_REGIONS'] = EnvironmentVariables.get_boolean_from_environment('RUN_ACTIVE_REGIONS', False)
+ self._environment_variables_dict['CRO_RESOURCE_TAG_NAME'] = EnvironmentVariables.get_env('CRO_RESOURCE_TAG_NAME', 'TicketId')
+ self._environment_variables_dict['CRO_REPLACED_USERNAMES'] = literal_eval(EnvironmentVariables.get_env('CRO_REPLACED_USERNAMES', "['osdCcsAdmin']"))
+ self._environment_variables_dict['CE_PAYER_INDEX'] = EnvironmentVariables.get_env('CE_PAYER_INDEX', '')
+ self._environment_variables_dict['EMAIL_TO'] = EnvironmentVariables.get_env('EMAIL_TO', '')
+ self._environment_variables_dict['EMAIL_CC'] = literal_eval(EnvironmentVariables.get_env('EMAIL_CC', "[]"))
+ self._environment_variables_dict['MANAGER_ESCALATION_DAYS'] = int(EnvironmentVariables.get_env('MANAGER_ESCALATION_DAYS', '3'))
+ self._environment_variables_dict['GLOBAL_CLOUD_ADMIN'] = EnvironmentVariables.get_env('GLOBAL_CLOUD_ADMIN', 'natashba')
+ self._environment_variables_dict['TICKET_OVER_USAGE_LIMIT'] = int(EnvironmentVariables.get_env('TICKET_OVER_USAGE_LIMIT', '80'))
+
+ # AWS Athena
+ self._environment_variables_dict['S3_RESULTS_PATH'] = EnvironmentVariables.get_env('S3_RESULTS_PATH', '')
+ self._environment_variables_dict['DEFAULT_ROUND_DIGITS'] = \
+ int(EnvironmentVariables.get_env('DEFAULT_ROUND_DIGITS', '3'))
+ self._environment_variables_dict['ATHENA_DATABASE_NAME'] = EnvironmentVariables.get_env('ATHENA_DATABASE_NAME', '')
+ self._environment_variables_dict['ATHENA_TABLE_NAME'] = EnvironmentVariables.get_env('ATHENA_TABLE_NAME', '')
+
+
+
@staticmethod
- def get_env(var: str, defval: any = ''):
- return os.environ.get(var, defval)
+ def to_bool(arg, def_val: bool = None):
+ if isinstance(arg, bool):
+ return arg
+ if isinstance(arg, (int, float)):
+ return arg != 0
+ if isinstance(arg, str):
+ arg = arg.lower()
+ if arg == 'true' or arg == 'yes':
+ return True
+ elif arg == 'false' or arg == 'no':
+ return False
+ try:
+ arg1 = int(arg)
+ return arg1 != 0
+ except Exception:
+ pass
+ if def_val is not None:
+ return def_val
+ raise ParseFailed(f'Cannot parse {arg} as a boolean value')
+
+ def get_aws_account_alias_name(self):
+ """
+ This method return the aws account alias name
+ :return:
+ """
+ iam_client = boto3.client('iam')
+ try:
+ account_alias = iam_client.list_account_aliases()['AccountAliases']
+ if account_alias:
+ return account_alias[0].upper()
+ except:
+ return os.environ.get('account', '').upper()
+
+
+ @staticmethod
+ def get_env(var: str, defval=''):
+ lcvar = var.lower()
+ dashvar = lcvar.replace('_', '-')
+ parser = argparse.ArgumentParser(description='Run CloudGovernance', allow_abbrev=False)
+ if lcvar == dashvar:
+ parser.add_argument(f"--{lcvar}", default=os.environ.get(var, defval), type=str, metavar='String', help=var)
+ else:
+ parser.add_argument(f"--{lcvar}", f"--{dashvar}", default=os.environ.get(var, defval), type=str,
+ metavar='String', help=var)
+ args, ignore = parser.parse_known_args()
+ if hasattr(args, lcvar):
+ return getattr(args, lcvar)
+ else:
+ return os.environ.get(var, defval)
+
+ @staticmethod
+ def get_boolean_from_environment(var: str, defval: bool):
+ return EnvironmentVariables.to_bool(EnvironmentVariables.get_env(var), defval)
@property
def environment_variables_dict(self):
@@ -164,7 +279,6 @@ def environment_variables_dict(self):
environment_variables = EnvironmentVariables()
-
# env vars examples
# os.environ['AWS_DEFAULT_REGION'] = 'us-east-2'
# os.environ['AWS_DEFAULT_REGION'] = 'all'
diff --git a/cloud_governance/main/main.py b/cloud_governance/main/main.py
index 61b92c2d..ca5bd962 100644
--- a/cloud_governance/main/main.py
+++ b/cloud_governance/main/main.py
@@ -4,6 +4,8 @@
import boto3 # regions
from cloud_governance.cloud_resource_orchestration.monitor.cloud_monitor import CloudMonitor
+from cloud_governance.main.main_common_operations import run_common_policies
+from cloud_governance.main.run_cloud_resource_orchestration import run_cloud_resource_orchestration
from cloud_governance.policy.policy_operations.aws.cost_expenditure.cost_report_policies import CostReportPolicies
from cloud_governance.policy.policy_operations.azure.azure_policy_runner import AzurePolicyRunner
from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp, logger
@@ -13,6 +15,7 @@
remove_tag_non_cluster_resource, tag_na_resources
from cloud_governance.policy.policy_operations.aws.tag_user.run_tag_iam_user import tag_iam_user, run_validate_iam_user_tags
from cloud_governance.policy.policy_operations.aws.zombie_cluster.run_zombie_cluster_resources import zombie_cluster_resource
+from cloud_governance.policy.policy_operations.gcp.gcp_policy_runner import GcpPolicyRunner
from cloud_governance.policy.policy_operations.gitleaks.gitleaks import GitLeaks
from cloud_governance.policy.policy_operations.ibm.ibm_operations.ibm_policy_runner import IBMPolicyRunner
from cloud_governance.main.environment_variables import environment_variables
@@ -167,12 +170,6 @@ def run_policy(account: str, policy: str, region: str, dry_run: str):
raise Exception(f'Missing Policy name: {policy}')
-@logger_time_stamp
-def run_cloud_management():
- """This method run the cloud management"""
- return CloudMonitor().run()
-
-
@logger_time_stamp
def main():
"""
@@ -194,110 +191,124 @@ def main():
es_doc_type = environment_variables_dict.get('es_doc_type', '')
bucket = environment_variables_dict.get('bucket', '')
- non_cluster_polices_runner = None
- is_non_cluster_polices_runner = policy in environment_variables_dict.get('aws_non_cluster_policies')
- if is_non_cluster_polices_runner:
- non_cluster_polices_runner = ZombieNonClusterPolicies()
+ if environment_variables_dict.get('COMMON_POLICIES'):
+ run_common_policies()
+ elif environment_variables_dict.get('CLOUD_RESOURCE_ORCHESTRATION'):
+ run_cloud_resource_orchestration()
+ else:
+ non_cluster_polices_runner = None
+ is_non_cluster_polices_runner = policy in environment_variables_dict.get('aws_non_cluster_policies')
+ if is_non_cluster_polices_runner:
+ non_cluster_polices_runner = ZombieNonClusterPolicies()
- ibm_classic_infrastructure_policy_runner = None
- is_tag_ibm_classic_infrastructure_runner = policy in environment_variables_dict.get('ibm_policies')
- if not is_tag_ibm_classic_infrastructure_runner:
- if environment_variables_dict.get('PUBLIC_CLOUD_NAME') and environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper() == 'IBM':
- is_tag_ibm_classic_infrastructure_runner = policy in environment_variables_dict.get('cost_policies')
- if is_tag_ibm_classic_infrastructure_runner:
- ibm_classic_infrastructure_policy_runner = IBMPolicyRunner()
+ ibm_classic_infrastructure_policy_runner = None
+ is_tag_ibm_classic_infrastructure_runner = policy in environment_variables_dict.get('ibm_policies')
+ if not is_tag_ibm_classic_infrastructure_runner:
+ if environment_variables_dict.get('PUBLIC_CLOUD_NAME') and environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper() == 'IBM':
+ is_tag_ibm_classic_infrastructure_runner = policy in environment_variables_dict.get('cost_policies')
+ if is_tag_ibm_classic_infrastructure_runner:
+ ibm_classic_infrastructure_policy_runner = IBMPolicyRunner()
- is_cost_explorer_policies_runner = ''
- if not environment_variables_dict.get('PUBLIC_CLOUD_NAME'):
- cost_explorer_policies_runner = None
- is_cost_explorer_policies_runner = policy in environment_variables_dict.get('cost_policies')
- if is_cost_explorer_policies_runner:
- cost_explorer_policies_runner = CostReportPolicies()
+ is_cost_explorer_policies_runner = ''
+ if environment_variables_dict.get('PUBLIC_CLOUD_NAME') == 'AWS':
+ cost_explorer_policies_runner = None
+ is_cost_explorer_policies_runner = policy in environment_variables_dict.get('cost_policies')
+ if is_cost_explorer_policies_runner:
+ cost_explorer_policies_runner = CostReportPolicies()
- is_azure_policy_runner = ''
- if environment_variables_dict.get('PUBLIC_CLOUD_NAME') and environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper() == 'AZURE':
- azure_cost_policy_runner = None
- is_azure_policy_runner = policy in environment_variables_dict.get('cost_policies')
- if is_azure_policy_runner:
- azure_cost_policy_runner = AzurePolicyRunner()
+ is_azure_policy_runner = ''
+ if environment_variables_dict.get('PUBLIC_CLOUD_NAME') and environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper() == 'AZURE':
+ azure_cost_policy_runner = None
+ is_azure_policy_runner = policy in environment_variables_dict.get('cost_policies')
+ if is_azure_policy_runner:
+ azure_cost_policy_runner = AzurePolicyRunner()
- # cloud_resource_orchestration lon_run/short_run
- is_cloud_management = False
- if environment_variables_dict.get('MANAGEMENT'):
- is_cloud_management = True
+ is_gcp_policy_runner = ''
+ if environment_variables_dict.get('PUBLIC_CLOUD_NAME') and environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper() == 'GCP':
+ gcp_cost_policy_runner = None
+ is_gcp_policy_runner = policy in environment_variables_dict.get('cost_policies')
+ if is_gcp_policy_runner:
+ gcp_cost_policy_runner = GcpPolicyRunner()
- @logger_time_stamp
- def run_non_cluster_polices_runner():
- """
- This method run the aws non-cluster policies
- @return:
- """
- non_cluster_polices_runner.run()
+ @logger_time_stamp
+ def run_non_cluster_polices_runner():
+ """
+ This method run the aws non-cluster policies
+ @return:
+ """
+ non_cluster_polices_runner.run()
- def run_tag_ibm_classic_infrastructure_runner():
- """
- This method run the IBM policies
- @return:
- """
- ibm_classic_infrastructure_policy_runner.run()
+ def run_tag_ibm_classic_infrastructure_runner():
+ """
+ This method run the IBM policies
+ @return:
+ """
+ ibm_classic_infrastructure_policy_runner.run()
- @logger_time_stamp
- def run_cost_explorer_policies_runner():
- """
- This method run the aws cost_explorer policies
- @return:
- """
- cost_explorer_policies_runner.run()
+ @logger_time_stamp
+ def run_cost_explorer_policies_runner():
+ """
+ This method run the aws cost_explorer policies
+ @return:
+ """
+ cost_explorer_policies_runner.run()
- @logger_time_stamp
- def run_azure_policy_runner():
- """
- This method run the azure policies
- @return:
- """
- azure_cost_policy_runner.run()
+ @logger_time_stamp
+ def run_azure_policy_runner():
+ """
+ This method run the azure policies
+ @return:
+ """
+ azure_cost_policy_runner.run()
- # 1. ELK Uploader
- if upload_data_es:
- input_data = {'es_host': es_host,
- 'es_port': int(es_port),
- 'es_index': es_index,
- 'es_doc_type': es_doc_type,
- 'es_add_items': {'account': account},
- 'bucket': bucket,
- 'logs_bucket_key': 'logs',
- 's3_file_name': 'resources.json',
- 'region': region_env,
- 'policy': policy,
- }
- elk_uploader = ESUploader(**input_data)
- elk_uploader.upload_to_es(account=account)
- # 2. POLICY
- elif is_non_cluster_polices_runner:
- run_non_cluster_polices_runner()
- elif is_tag_ibm_classic_infrastructure_runner:
- run_tag_ibm_classic_infrastructure_runner()
- elif is_cost_explorer_policies_runner:
- run_cost_explorer_policies_runner()
- elif is_azure_policy_runner:
- run_azure_policy_runner()
- elif is_cloud_management:
- run_cloud_management()
- else:
- if not policy:
- logger.exception(f'Missing Policy name: "{policy}"')
- raise Exception(f'Missing Policy name: "{policy}"')
- if region_env == 'all':
- # must be set for boto3 client default region
- # environment_variables_dict['AWS_DEFAULT_REGION'] = 'us-east-2'
- ec2 = boto3.client('ec2')
- regions_data = ec2.describe_regions()
- for region in regions_data['Regions']:
- # logger.info(f"region: {region['RegionName']}")
- environment_variables_dict['AWS_DEFAULT_REGION'] = region['RegionName']
- run_policy(account=account, policy=policy, region=region['RegionName'], dry_run=dry_run)
+ @logger_time_stamp
+ def run_gcp_policy_runner():
+ """
+ This method run the gcp policies
+ """
+ gcp_cost_policy_runner.run()
+
+ # 1. ELK Uploader
+ if upload_data_es:
+ input_data = {'es_host': es_host,
+ 'es_port': int(es_port),
+ 'es_index': es_index,
+ 'es_doc_type': es_doc_type,
+ 'es_add_items': {'account': account},
+ 'bucket': bucket,
+ 'logs_bucket_key': 'logs',
+ 's3_file_name': 'resources.json',
+ 'region': region_env,
+ 'policy': policy,
+ }
+ elk_uploader = ESUploader(**input_data)
+ elk_uploader.upload_to_es(account=account)
+ # 2. POLICY
+ elif is_non_cluster_polices_runner:
+ run_non_cluster_polices_runner()
+ elif is_tag_ibm_classic_infrastructure_runner:
+ run_tag_ibm_classic_infrastructure_runner()
+ elif is_cost_explorer_policies_runner:
+ run_cost_explorer_policies_runner()
+ elif is_azure_policy_runner:
+ run_azure_policy_runner()
+ elif is_gcp_policy_runner:
+ run_gcp_policy_runner()
else:
- run_policy(account=account, policy=policy, region=region_env, dry_run=dry_run)
+ if not policy:
+ logger.exception(f'Missing Policy name: "{policy}"')
+ raise Exception(f'Missing Policy name: "{policy}"')
+ if region_env == 'all':
+ # must be set for boto3 client default region
+ # environment_variables_dict['AWS_DEFAULT_REGION'] = 'us-east-2'
+ ec2 = boto3.client('ec2')
+ regions_data = ec2.describe_regions()
+ for region in regions_data['Regions']:
+ # logger.info(f"region: {region['RegionName']}")
+ environment_variables_dict['AWS_DEFAULT_REGION'] = region['RegionName']
+ run_policy(account=account, policy=policy, region=region['RegionName'], dry_run=dry_run)
+ else:
+ run_policy(account=account, policy=policy, region=region_env, dry_run=dry_run)
main()
diff --git a/cloud_governance/main/main_common_operations.py b/cloud_governance/main/main_common_operations.py
new file mode 100644
index 00000000..eb6a2437
--- /dev/null
+++ b/cloud_governance/main/main_common_operations.py
@@ -0,0 +1,12 @@
+from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp
+from cloud_governance.policy.policy_runners.common_policy_runner import CommonPolicyRunner
+
+
+@logger_time_stamp
+def run_common_policies():
+ """
+ This method run the common policies
+ :return:
+ """
+ common_policy_runner = CommonPolicyRunner()
+ common_policy_runner.run()
diff --git a/cloud_governance/main/run_cloud_resource_orchestration.py b/cloud_governance/main/run_cloud_resource_orchestration.py
new file mode 100644
index 00000000..268b0479
--- /dev/null
+++ b/cloud_governance/main/run_cloud_resource_orchestration.py
@@ -0,0 +1,19 @@
+from tempfile import TemporaryDirectory
+
+from cloud_governance.cloud_resource_orchestration.monitor.cloud_monitor import CloudMonitor
+from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp
+from cloud_governance.main.environment_variables import environment_variables
+
+
+@logger_time_stamp
+def run_cloud_management():
+ """This method run the cloud management"""
+ environment_variables_dict = environment_variables.environment_variables_dict
+ with TemporaryDirectory() as cache_temp_dir:
+ environment_variables_dict['TEMPORARY_DIRECTORY'] = cache_temp_dir
+ environment_variables_dict['policy'] = 'cloud_resource_orchestration'
+ return CloudMonitor().run()
+
+
+def run_cloud_resource_orchestration():
+ run_cloud_management()
diff --git a/cloud_governance/policy/aws/cost_billing_reports.py b/cloud_governance/policy/aws/cost_billing_reports.py
index ee15ae6b..a2acb789 100644
--- a/cloud_governance/policy/aws/cost_billing_reports.py
+++ b/cloud_governance/policy/aws/cost_billing_reports.py
@@ -35,13 +35,13 @@ def __init__(self):
self.__gsheet_id = self._environment_variables_dict.get('SPREADSHEET_ID', '')
self.gdrive_operations = GoogleDriveOperations()
self.update_to_gsheet = UploadToGsheet()
- self.cost_center, self.__account_budget, self.__years = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.__account_id)
+ self.cost_center, self.__account_budget, self.__years, self.__owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.__account_id)
except:
pass
def __get_start_date(self, end_date: datetime, days: int, operation: operator) -> datetime:
"""
- This method return start_date
+ This method returns start_date
@param operation:
@param end_date:
@param days:
diff --git a/cloud_governance/policy/aws/cost_explorer.py b/cloud_governance/policy/aws/cost_explorer.py
index 21426c2a..f1313dd4 100644
--- a/cloud_governance/policy/aws/cost_explorer.py
+++ b/cloud_governance/policy/aws/cost_explorer.py
@@ -1,4 +1,3 @@
-
from ast import literal_eval
from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations
@@ -16,6 +15,18 @@ class CostExplorer:
"""
BULK_UPLOAD_THREADS = 8
+ SAVINGS_PLAN_FILTER = { # savings plan usage for ce filter
+ 'Dimensions': {
+ 'Key': 'RECORD_TYPE',
+ 'Values': ['SavingsPlanRecurringFee', 'SavingsPlanNegation', 'SavingsPlanCoveredUsage']
+ }
+ }
+ EXCLUDE = 'exclude'
+ INCLUDE = 'include'
+ CE_KEY_RESULTS_BY_TIME = 'ResultsByTime'
+ INDEX_ID = 'index_id'
+ APPEND = 'a'
+ CE_OPERATION = 'Not'
def __init__(self):
super().__init__()
@@ -32,11 +43,12 @@ def __init__(self):
self._elastic_upload = ElasticUpload()
self.__account = self.__environment_variables_dict.get('account').upper().replace('OPENSHIFT-', "").strip()
- def filter_data_by_tag(self, groups: dict, tag: str):
+ def filter_data_by_tag(self, groups: dict, tag: str, savings_plan: str):
"""
This method extract data by tag
@param tag:
@param groups: Data from the cloud explorer
+ @param savings_plan:
@return: converted into dict format
"""
data = {}
@@ -58,13 +70,14 @@ def filter_data_by_tag(self, groups: dict, tag: str):
else:
if 'vm_import_image' in name:
name = 'vm_import_image'
- index_id = f'{start_time.lower()}-{account.lower()}-{tag.lower()}-{name.lower()}'
+ index_id = f'{start_time}-{account}-{tag}-savings-{savings_plan}-{name}'.lower()
if index_id not in data:
- upload_data = {tag: name if tag.upper() == 'ChargeType'.upper() else name.upper(),
- 'Cost': round(float(amount), 3), 'index_id': index_id, 'timestamp': start_time}
+ upload_data = {tag: name if tag.upper() in list(self.__cost_explorer.CE_COST_TYPES) else name.upper(),
+ 'Cost': round(float(amount), 3), self.INDEX_ID: index_id, 'timestamp': start_time, 'savings_plan': savings_plan}
if 'global' in self._elastic_upload.es_index:
if 'Budget' not in upload_data:
upload_data['Budget'] = self._elastic_upload.account
+ upload_data['tag'] = tag.lower()
data[index_id] = upload_data
else:
data[index_id]['Cost'] += round(float(amount), 3)
@@ -77,15 +90,19 @@ def __get_daily_cost_by_tags(self):
"""
data_house = {}
for tag in self.cost_tags:
- if self.start_date and self.end_date:
- response = self.__cost_explorer.get_cost_by_tags(tag=tag, start_date=self.start_date, end_date=self.end_date, granularity=self.granularity, cost_metric=self.cost_metric)
- else:
- response = self.__cost_explorer.get_cost_by_tags(tag=tag, granularity=self.granularity, cost_metric=self.cost_metric)
- results_by_time = response.get('ResultsByTime')
- if results_by_time:
- data_house[tag] = []
- for result in results_by_time:
- data_house[tag].extend(self.filter_data_by_tag(result, tag))
+ for savings_plan in [self.EXCLUDE, self.INCLUDE]:
+ filters = {}
+ if savings_plan == self.EXCLUDE:
+ filters = {self.CE_OPERATION: self.SAVINGS_PLAN_FILTER}
+ if self.start_date and self.end_date:
+ response = self.__cost_explorer.get_cost_by_tags(tag=tag, start_date=self.start_date, end_date=self.end_date, granularity=self.granularity, cost_metric=self.cost_metric, Filter=filters)
+ else:
+ response = self.__cost_explorer.get_cost_by_tags(tag=tag, granularity=self.granularity, cost_metric=self.cost_metric, Filter=filters)
+ results_by_time = response.get(self.CE_KEY_RESULTS_BY_TIME)
+ if results_by_time:
+ data_house[f'{tag}-{savings_plan}'] = []
+ for result in results_by_time:
+ data_house[f'{tag}-{savings_plan}'].extend(self.filter_data_by_tag(result, tag, savings_plan))
return data_house
@logger_time_stamp
@@ -97,12 +114,12 @@ def __upload_data(self, data: list, index: str):
@return:
"""
if self.file_name:
- with open(f'/tmp/{self.file_name}', 'a') as file:
+ with open(f'/tmp/{self.file_name}', self.APPEND) as file:
for value in data:
file.write(f'{value}\n')
else:
for value in data:
- self.upload_item_to_es(index=index, item=value, index_id=value['index_id'])
+ self.upload_item_to_es(index=index, item=value, index_id=value[self.INDEX_ID])
logger.info(f'Data uploaded to {index}, Total Data: {len(data)}')
def upload_item_to_es(self, item: dict, index: str, index_id: str = ''):
@@ -126,8 +143,8 @@ def upload_tags_cost_to_elastic_search(self):
logger.info(f'Get {self.granularity} Cost usage by metric: {self.cost_metric}')
cost_data = self.__get_daily_cost_by_tags()
for key, values in cost_data.items():
- index = f'{self._elastic_upload.es_index}-{key.lower()}'
- self.__upload_data(values, index)
+ logger.info(f"Uploading the data of {key} tag")
+ self.__upload_data(values, self._elastic_upload.es_index)
def run(self):
"""
diff --git a/cloud_governance/policy/aws/cost_explorer_payer_billings.py b/cloud_governance/policy/aws/cost_explorer_payer_billings.py
index 2c28739f..be159456 100644
--- a/cloud_governance/policy/aws/cost_explorer_payer_billings.py
+++ b/cloud_governance/policy/aws/cost_explorer_payer_billings.py
@@ -1,26 +1,43 @@
import copy
import datetime
+import logging
from ast import literal_eval
import boto3
+
+from cloud_governance.common.clouds.aws.iam.iam_operations import IAMOperations
+from cloud_governance.common.clouds.aws.savingsplan.savings_plans_operations import SavingsPlansOperations
from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp
-from cloud_governance.common.logger.init_logger import logger
+from cloud_governance.common.logger.init_logger import logger, handler
from cloud_governance.common.clouds.aws.cost_explorer.cost_explorer_operations import CostExplorerOperations
from cloud_governance.policy.aws.cost_billing_reports import CostBillingReports
class CostExplorerPayerBillings(CostBillingReports):
- """This class is responsible for generation cost billing report for Budget, Actual, Forecast from the Org level"""
+ """
+ This class is responsible for generation cost billing report for Budget, Actual, Forecast from the Org level
+ Monthly savings Plan Amortization: (linked_account_total_cost/payer_account_total_cost) * monthly_savings_plan_cost
+ Monthly_support_fee: (monthly_support_fee - (monthly_support_fee * discount ) ) * (linked_account_total_cost/payer_account_total_cost)
+ """
+
+ DEFAULT_ROUND_DIGITS = 3
def __init__(self):
super().__init__()
self.__aws_role = self._environment_variables_dict.get("AWS_ACCOUNT_ROLE")
self.__access_key, self.__secret_key, self.__session = self.__get_sts_credentials()
self.__ce_client = boto3.client('ce', aws_access_key_id=self.__access_key, aws_secret_access_key=self.__secret_key, aws_session_token=self.__session)
+ self.__savings_plan_client = boto3.client('savingsplans', aws_access_key_id=self.__access_key, aws_secret_access_key=self.__secret_key, aws_session_token=self.__session)
+ self.__iam_client = boto3.client('iam', aws_access_key_id=self.__access_key, aws_secret_access_key=self.__secret_key, aws_session_token=self.__session)
+ self.__assumed_role_account_name = IAMOperations(iam_client=self.__iam_client).get_account_alias_cloud_name()
self.__cost_explorer_operations = CostExplorerOperations(ce_client=self.__ce_client)
- self.__cost_center_owner = literal_eval(self._environment_variables_dict.get('COST_CENTER_OWNER'))
+ self.__savings_plan_operations = SavingsPlansOperations(savings_plan_client=self.__savings_plan_client)
self.__replacement_account = literal_eval(self._environment_variables_dict.get('REPLACE_ACCOUNT_NAME'))
+ self.__savings_discounts = float(self._environment_variables_dict.get('PAYER_SUPPORT_FEE_CREDIT', 0))
+ self.__monthly_cost_for_spa_calc = {}
+ self.__monthly_cost_for_support_fee = {}
+ self.__temporary_dir = self._environment_variables_dict.get('TEMPORARY_DIR', '')
def __get_sts_credentials(self):
"""This method returns the temporary credentials from the sts service"""
@@ -50,23 +67,27 @@ def filter_data_by_tag(self, cost_data: dict, tag: str, cost_center: int = ''):
if name and amount:
if name not in data:
if cost_center:
- acc_cost_center, account_budget, years = self.update_to_gsheet.get_cost_center_budget_details(account_id=name, dir_path='/tmp')
+ acc_cost_center, account_budget, years, owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=name, dir_path=self.__temporary_dir)
timestamp = datetime.datetime.strptime(start_time, '%Y-%m-%d')
month = datetime.datetime.strftime(timestamp, "%Y %b")
- owner = self.__cost_center_owner.get(str(acc_cost_center)) if self.__cost_center_owner.get(str(acc_cost_center)) else 'Others'
+ month_full_name = datetime.datetime.strftime(timestamp, "%B")
+ payer_monthly_savings_plan = self.update_to_gsheet.get_monthly_spa(month_name=month_full_name, dir_path=self.__temporary_dir)
budget = account_budget if start_time.split('-')[0] in years else 0
index_id = f'{start_time}-{name}'
- upload_data = {tag: name, 'Actual': round(float(amount), 3), 'start_date': start_time,
+ upload_data = {tag: name, 'Actual': round(float(amount), self.DEFAULT_ROUND_DIGITS), 'start_date': start_time,
'timestamp': timestamp, 'CloudName': 'AWS', 'Month': month,
'Forecast': 0,
'filter_date': f'{start_time}-{month.split()[-1]}',
- 'Budget': round(budget / self.MONTHS, 3), 'CostCenter': cost_center,
+ 'Budget': round(budget / self.MONTHS, self.DEFAULT_ROUND_DIGITS), 'CostCenter': cost_center,
'AllocatedBudget': budget,
- "Owner": owner
+ "Owner": owner,
+ 'SavingsPlanCost': (float(amount) / float(self.__monthly_cost_for_spa_calc.get(start_time))) * payer_monthly_savings_plan,
+ 'TotalPercentage': (float(amount) / float(self.__monthly_cost_for_spa_calc.get(start_time)))
}
+ upload_data['PremiumSupportFee'] = (float(self.__monthly_cost_for_support_fee.get(start_time)) - (float(self.__monthly_cost_for_support_fee.get(start_time)) * self.__savings_discounts)) * upload_data['TotalPercentage'],
else:
index_id = f'{start_time}-{name}'
- upload_data = {tag: name, 'Actual': round(float(amount), 3)}
+ upload_data = {tag: name, 'Actual': round(float(amount), self.DEFAULT_ROUND_DIGITS)}
if index_id:
data[index_id] = upload_data
if cost_data.get('DimensionValueAttributes'):
@@ -82,20 +103,28 @@ def filter_data_by_tag(self, cost_data: dict, tag: str, cost_center: int = ''):
return data
def filter_forecast_data(self, cost_forecast_data: list, cost_usage_data: dict, account_id: str, cost_center: int, account: str):
- acc_cost_center, account_budget, years = self.update_to_gsheet.get_cost_center_budget_details(account_id=account_id, dir_path='/tmp')
- owner = self.__cost_center_owner.get(str(acc_cost_center)) if self.__cost_center_owner.get(str(acc_cost_center)) else 'Others'
+ acc_cost_center, account_budget, years, owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=account_id, dir_path=self.__temporary_dir)
for cost_forecast in cost_forecast_data:
start_date = str((cost_forecast.get('TimePeriod').get('Start')))
start_year = start_date.split('-')[0]
- cost = round(float(cost_forecast.get('MeanValue')), 3)
+ cost = round(float(cost_forecast.get('MeanValue')), self.DEFAULT_ROUND_DIGITS)
index = f'{start_date}-{account_id}'
+ month_full_name = datetime.datetime.strftime(datetime.datetime.strptime(start_date, '%Y-%m-%d'), "%B")
+ total_percentage = (cost / float(self.__monthly_cost_for_spa_calc.get(start_date)))
+ payer_monthly_savings_plan = self.update_to_gsheet.get_monthly_spa(month_name=month_full_name, dir_path=self.__temporary_dir)
if index in cost_usage_data[account]:
cost_usage_data[account][index]['Forecast'] = cost
+ cost_usage_data[account][index]['TotalPercentage'] = total_percentage
+ cost_usage_data[account][index]['SavingsPlanCost'] = total_percentage * payer_monthly_savings_plan
+ cost_usage_data[account][index]['PremiumSupportFee'] = total_percentage * (float(self.__monthly_cost_for_support_fee.get(start_date)) - (float(self.__monthly_cost_for_support_fee.get(start_date)) * self.__savings_discounts))
else:
data = {}
data['AccountId'] = account_id
data['Actual'] = 0
data['Forecast'] = cost
+ data['TotalPercentage'] = total_percentage
+ data['SavingsPlanCost'] = total_percentage * payer_monthly_savings_plan
+ data['PremiumSupportFee'] = total_percentage * (float(self.__monthly_cost_for_support_fee.get(start_date)) - (float(self.__monthly_cost_for_support_fee.get(start_date)) * self.__savings_discounts))
data['Account'] = account
data['start_date'] = str((cost_forecast.get('TimePeriod').get('Start')))
data['index_id'] = f"""{data['start_date']}-{data['Account'].lower()}"""
@@ -103,7 +132,7 @@ def filter_forecast_data(self, cost_forecast_data: list, cost_usage_data: dict,
data['Month'] = datetime.datetime.strftime(data['timestamp'], '%Y %b')
data['Owner'] = owner
if start_year in years:
- data['Budget'] = round(account_budget / self.MONTHS, 3)
+ data['Budget'] = round(account_budget / self.MONTHS, self.DEFAULT_ROUND_DIGITS)
data['AllocatedBudget'] = account_budget
else:
data['Budget'] = 0
@@ -113,7 +142,6 @@ def filter_forecast_data(self, cost_forecast_data: list, cost_usage_data: dict,
data['filter_date'] = f'{data["start_date"]}-{data["Month"].split()[-1]}'
cost_usage_data[account][index] = data
- @logger_time_stamp
def get_linked_accounts_forecast(self, linked_account_usage: dict):
"""
This method append the forecast to the linked accounts
@@ -125,8 +153,8 @@ def get_linked_accounts_forecast(self, linked_account_usage: dict):
try:
cost_forecast_data = self.__cost_explorer_operations.get_cost_forecast(start_date=start_date, end_date=end_date, granularity=self.GRANULARITY, cost_metric=self.COST_METRIC, Filter={'Dimensions': {'Key': 'LINKED_ACCOUNT', 'Values': [account_id]}})
self.filter_forecast_data(cost_forecast_data=cost_forecast_data['ForecastResultsByTime'], cost_center=cost_center, account=account, account_id=account_id, cost_usage_data=linked_account_usage)
- except:
- logger.info(f'No Data to get forecast: {account_id}: {account}')
+ except Exception as err:
+ logger.info(f'No Data to get forecast: {account_id}: {account}, {err}')
@logger_time_stamp
def get_cost_centers(self):
@@ -151,9 +179,38 @@ def get_linked_accounts_usage(self):
account = usage['Account']
cost_usage_data.setdefault(account, {}).update({idx: usage})
self.get_linked_accounts_forecast(linked_account_usage=cost_usage_data)
+ self.__get_ce_cost_usage_by_filter_tag(tag_name='spot', cost_centers=cost_centers, cost_usage_data=cost_usage_data)
+ handler.setLevel(logging.WARN)
+ # To prevent printing the **kwargs of the function when using the logger_time_stamp decorator.
self.upload_data_elastic_search(linked_account_usage=cost_usage_data)
+ handler.setLevel(logging.INFO)
return cost_usage_data
+ def __get_ce_cost_usage_by_filter_tag(self, cost_centers: list, tag_name: str, cost_usage_data: dict):
+ """
+ This method returns the cost by filter tag_name
+ :param cost_centers:
+ :param tag_name:
+ :return:
+ """
+ start_date, end_date = self.get_date_ranges()
+ for cost_center in cost_centers:
+ cost_center_number = cost_center.get('CostCenter')
+ filter_cost_center = {'CostCategories': {'Key': 'CostCenter', 'Values': [cost_center_number]}}
+ values = self.__cost_explorer_operations.CE_COST_FILTERS[tag_name.upper()]['Values']
+ filter_tag_value = {'Dimensions': {'Key': 'PURCHASE_TYPE', 'Values': values}}
+ group_by = {'Type': 'DIMENSION', 'Key': 'LINKED_ACCOUNT'}
+ cost_data = self.__cost_explorer_operations.get_cost_and_usage_from_aws(start_date=start_date, end_date=end_date, granularity="MONTHLY",
+ GroupBy=[group_by], Filter={'And': [filter_cost_center, filter_tag_value]})
+ filtered_data = self.__cost_explorer_operations.get_ce_report_filter_data(ce_response=cost_data, tag_name=tag_name)
+ if filtered_data:
+ for index_id, row in filtered_data.items():
+ account = row.get('Account')
+ if account in self.__replacement_account:
+ account = self.__replacement_account[account]
+ if account in cost_usage_data:
+ cost_usage_data[account][index_id][f'{tag_name.title()}Usage'] = round(float(row.get(tag_name)), self.DEFAULT_ROUND_DIGITS)
+
@logger_time_stamp
def upload_data_elastic_search(self, linked_account_usage: dict):
"""This method uploads the data to elastic search"""
@@ -163,8 +220,43 @@ def upload_data_elastic_search(self, linked_account_usage: dict):
monthly_account_cost.append(cost)
self.elastic_upload.es_upload_data(items=monthly_account_cost, set_index='index_id')
+ def filter_cost_details_for_sp(self, total_cost: list):
+ """"This method filter the account total cost"""
+ results = {}
+ for row in total_cost:
+ start_time = row.get('TimePeriod').get('Start')
+ if row.get('MeanValue'):
+ cost = round(float(row.get('MeanValue')), self.DEFAULT_ROUND_DIGITS)
+ else:
+ cost = round(float(row.get('Total').get('UnblendedCost').get('Amount')), self.DEFAULT_ROUND_DIGITS)
+ results[start_time] = cost
+ return results
+
+ def get_monthly_cost_details(self, start_date: datetime = None, end_date: datetime = None):
+ """This method list the savings plan details"""
+ current_date = datetime.datetime.utcnow()
+ if not start_date and not end_date:
+ end_date = (current_date.replace(day=1) - datetime.timedelta(days=1)).date()
+ start_date = end_date.replace(day=1)
+ end_date = end_date + datetime.timedelta(days=1)
+ payer_cost_response = self.__cost_explorer_operations.get_cost_and_usage_from_aws(start_date=str(start_date), end_date=str(end_date), granularity='MONTHLY', cost_metric=self.COST_METRIC, Filter={'Not': {'Dimensions': {'Key': 'RECORD_TYPE', 'Values': ['Support', 'Refund', 'Credit']}}})
+ payer_support_fee = self.__cost_explorer_operations.get_cost_and_usage_from_aws(start_date=str(start_date), end_date=str(end_date), granularity='MONTHLY', cost_metric=self.COST_METRIC, Filter={'Dimensions': {'Key': 'RECORD_TYPE', 'Values': ['Support']}})
+ filtered_payer_cost = self.filter_cost_details_for_sp(payer_cost_response.get('ResultsByTime'))
+ filtered_support_fee = self.filter_cost_details_for_sp(payer_support_fee.get('ResultsByTime'))
+ self.__monthly_cost_for_spa_calc = filtered_payer_cost
+ self.__monthly_cost_for_support_fee.update(filtered_support_fee)
+ start_date = current_date.date()
+ end_date = start_date + datetime.timedelta(days=360)
+ forecast_response = self.__cost_explorer_operations.get_cost_forecast(start_date=str(start_date), end_date=str(end_date), granularity=self.GRANULARITY, cost_metric=self.COST_METRIC, Filter={'Not': {'Dimensions': {'Key': 'RECORD_TYPE', 'Values': ['Support', 'Refund', 'Credit']}}})
+ payer_forecast_support_fee = self.__cost_explorer_operations.get_cost_forecast(start_date=str(start_date), end_date=str(end_date), granularity=self.GRANULARITY, cost_metric=self.COST_METRIC, Filter={'Dimensions': {'Key': 'RECORD_TYPE', 'Values': ['Support']}})
+ filtered_payer_forecast = self.filter_cost_details_for_sp(forecast_response.get('ForecastResultsByTime'))
+ filtered_payer_support_forecast = self.filter_cost_details_for_sp(payer_forecast_support_fee.get('ForecastResultsByTime'))
+ self.__monthly_cost_for_spa_calc.update(filtered_payer_forecast)
+ self.__monthly_cost_for_support_fee.update(filtered_payer_support_forecast)
+
def run(self):
"""
This method run the methods
"""
+ self.get_monthly_cost_details()
self.get_linked_accounts_usage()
diff --git a/cloud_governance/policy/aws/cost_over_usage.py b/cloud_governance/policy/aws/cost_over_usage.py
index e28b1930..616367f7 100644
--- a/cloud_governance/policy/aws/cost_over_usage.py
+++ b/cloud_governance/policy/aws/cost_over_usage.py
@@ -28,7 +28,7 @@ def __init__(self):
def get_user_used_instances(self, user_used_list: list):
"""
- This method return user used instances group by region
+ This method returns user used instances group by region
@return:
"""
region_resources = {}
diff --git a/cloud_governance/policy/aws/ebs_unattached.py b/cloud_governance/policy/aws/ebs_unattached.py
index e882cb65..2470918c 100644
--- a/cloud_governance/policy/aws/ebs_unattached.py
+++ b/cloud_governance/policy/aws/ebs_unattached.py
@@ -52,9 +52,10 @@ def __delete_ebs_unattached(self):
days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE,
extra_purse=ebs_cost, delta_cost=delta_cost)
if unattached_volumes:
- unattached_volumes_data.append([volume.get('VolumeId'),
- self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='Name'),
- self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='User'),
- str(last_detached_days),
- self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='Policy')])
+ unattached_volumes_data.append({'ResourceId': volume.get('VolumeId'),
+ 'Name': self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='Name'),
+ 'User': self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='User'),
+ 'Days': str(last_detached_days),
+ 'Skip': self._get_tag_name_from_tags(tags=volume.get('Tags'), tag_name='Policy')
+ })
return unattached_volumes_data
diff --git a/cloud_governance/policy/aws/ec2_idle.py b/cloud_governance/policy/aws/ec2_idle.py
index 947086db..d78119dc 100644
--- a/cloud_governance/policy/aws/ec2_idle.py
+++ b/cloud_governance/policy/aws/ec2_idle.py
@@ -116,7 +116,7 @@ def __get_metrics_from_cloud_watch(self, instance_id: str, instance_period: int)
def __get_proposed_metrics(self, metrics: list, metric_period: int):
"""
- This method return the metrics
+ This method returns the metrics
@param metrics:
@param metric_period:
@return:
@@ -125,7 +125,7 @@ def __get_proposed_metrics(self, metrics: list, metric_period: int):
def __get_time_difference(self, launch_time: datetime):
"""
- This method return the difference of datetime
+ This method returns the difference of datetime
@param launch_time:
@return:
"""
diff --git a/cloud_governance/policy/aws/ec2_stop.py b/cloud_governance/policy/aws/ec2_stop.py
index bc558717..6f537898 100644
--- a/cloud_governance/policy/aws/ec2_stop.py
+++ b/cloud_governance/policy/aws/ec2_stop.py
@@ -71,10 +71,15 @@ def __fetch_stop_instance(self, instance_days: int, delete_instance_days: int, s
stopped_instance_tags[instance_id] = resource.get('Tags')
ec2_types[instance_id] = resource.get('InstanceType')
block_device_mappings[instance_id] = resource.get('BlockDeviceMappings')
- stopped_instances.append([resource.get('InstanceId'), self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='Name'),
- self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='User'), str(resource.get('LaunchTime')),
- self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='Policy')
- ])
+ stopped_instances.append({
+ 'policy': self._policy,
+ 'ResourceId': resource.get('InstanceId'),
+ 'StoppedDays': days,
+ 'StoppedDate': str(resource.get('UsageOperationUpdateTime')),
+ 'Name': self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='Name'),
+ 'User': self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='User'),
+ 'LaunchTime': str(resource.get('LaunchTime')),
+ 'Policy': self._get_tag_name_from_tags(tags=resource.get('Tags'), tag_name='Policy')})
if self._dry_run == "no":
for instance_id, tags in stopped_instance_tags.items():
if self._get_policy_value(tags=tags) not in ('NOTDELETE', 'SKIP'):
diff --git a/cloud_governance/policy/aws/empty_roles.py b/cloud_governance/policy/aws/empty_roles.py
index 2482b81d..72d31fbb 100644
--- a/cloud_governance/policy/aws/empty_roles.py
+++ b/cloud_governance/policy/aws/empty_roles.py
@@ -12,7 +12,7 @@ def __init__(self):
def run(self):
"""
- This method return all empty roles, delete if dry_run no
+ This method returns all empty roles, delete if dry_run no
@return:
"""
return self.__delete_empty_roles()
@@ -40,7 +40,12 @@ def __delete_empty_roles(self):
empty_days = self._get_resource_last_used_days(tags=tags)
empty_role = self._check_resource_and_delete(resource_name='IAM Role', resource_id='RoleName', resource_type='CreateRole', resource=get_role, empty_days=empty_days, days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE, tags=tags)
if empty_role:
- zombie_roles.append([empty_role.get('RoleName'), self._get_tag_name_from_tags(tags=tags, tag_name='User'), self._get_policy_value(tags=tags), empty_days])
+ zombie_roles.append({
+ 'ResourceId': empty_role.get('RoleName'),
+ 'Name': empty_role.get('RoleName'),
+ 'User': self._get_tag_name_from_tags(tags=tags, tag_name='User'),
+ 'Skip': self._get_policy_value(tags=tags),
+ 'Days': empty_days})
else:
empty_days = 0
self._update_resource_tags(resource_id=role_name, tags=tags, left_out_days=empty_days, resource_left_out=role_empty)
diff --git a/cloud_governance/policy/aws/ip_unattached.py b/cloud_governance/policy/aws/ip_unattached.py
index c26ca878..8cefd079 100644
--- a/cloud_governance/policy/aws/ip_unattached.py
+++ b/cloud_governance/policy/aws/ip_unattached.py
@@ -13,7 +13,7 @@ def __init__(self):
def run(self):
"""
- This method return zombie elastic_ip's and delete if dry_run no
+ This method returns zombie elastic_ip's and delete if dry_run no
@return:
"""
addresses = self._ec2_operations.get_elastic_ips()
@@ -40,9 +40,12 @@ def run(self):
days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE, tags=tags,
extra_purse=eip_cost, delta_cost=delta_cost)
if zombie_eip:
- zombie_addresses.append([address.get('AllocationId'), self._get_tag_name_from_tags(tags=tags),
- self._get_tag_name_from_tags(tags=tags, tag_name='User'), address.get('PublicIp'),
- self._get_policy_value(tags=tags), unused_days])
+ zombie_addresses.append({'ResourceId': address.get('AllocationId'),
+ 'Name': self._get_tag_name_from_tags(tags=tags),
+ 'User': self._get_tag_name_from_tags(tags=tags, tag_name='User'),
+ 'PublicIp': address.get('PublicIp'),
+ 'Skip': self._get_policy_value(tags=tags),
+ 'Days': unused_days})
else:
unused_days = 0
self._update_resource_tags(resource_id=address.get('AllocationId'), tags=tags, left_out_days=unused_days, resource_left_out=ip_no_used)
diff --git a/cloud_governance/policy/aws/monthly_report.py b/cloud_governance/policy/aws/monthly_report.py
index 89a03530..d2b3b203 100644
--- a/cloud_governance/policy/aws/monthly_report.py
+++ b/cloud_governance/policy/aws/monthly_report.py
@@ -26,7 +26,7 @@ def __init__(self):
def policy_description(self, policy_name: str):
"""
- This method return the policy description
+ This method returns the policy description
@param policy_name:
@return:
"""
@@ -35,7 +35,7 @@ def policy_description(self, policy_name: str):
'ec2_idle': 'stops the idle instances in the last 7 days. ( CPU < 5%, Network < 5k )',
'ebs_unattached': 'Delete unattached EBS volumes, where the unused days are calculated by the last DeattachedTime',
'ip_unattached': 'Delete all the elastic_ips that are unused',
- 'nat_gateway_unused': ' Delete all unused nat gateways',
+ 'unused_nat_gateway': ' Delete all unused nat gateways',
'zombie_snapshots': 'Delete all the snapshots which the AMI does not use',
's3_inactive': 'Delete the empty buckets which don’t have any content.',
'empty_roles': 'Delete the empty role which does\'t have any policies',
@@ -82,7 +82,7 @@ def send_monthly_report(self):
def row_span(self, cols: int):
"""
- This method return the table data with colspan
+ This method returns the table data with colspan
@param cols:
@return:
"""
diff --git a/cloud_governance/policy/aws/nat_gateway_unused.py b/cloud_governance/policy/aws/nat_gateway_unused.py
deleted file mode 100644
index 43c5f112..00000000
--- a/cloud_governance/policy/aws/nat_gateway_unused.py
+++ /dev/null
@@ -1,51 +0,0 @@
-
-from cloud_governance.policy.policy_operations.aws.zombie_non_cluster.run_zombie_non_cluster_policies import NonClusterZombiePolicy
-
-
-class NatGatewayUnused(NonClusterZombiePolicy):
- """
- This class sends an alert mail for zombie Nat gateways ( based on vpc routes )
- to the user after 4 days and delete after 7 days.
- """
-
- def __init__(self):
- super().__init__()
-
- def __check_nat_gateway_in_routes(self, nat_gateway_id: str):
- route_tables = self._ec2_client.describe_route_tables()['RouteTables']
- nat_gateway_found = False
- for route_table in route_tables:
- for route in route_table.get('Routes'):
- if route.get('NatGatewayId') == nat_gateway_id:
- nat_gateway_found = True
- return nat_gateway_found
-
- def run(self):
- """
- This method return zombie NatGateways, delete if dry_run no
- @return:
- """
- nat_gateways = self._ec2_operations.get_nat_gateways()
- nat_gateway_unused_data = []
- for nat_gateway in nat_gateways:
- if self._get_policy_value(tags=nat_gateway.get('Tags', [])) not in ('NOTDELETE', 'SKIP'):
- nat_gateway_id = nat_gateway.get('NatGatewayId')
- tags = nat_gateway.get('Tags')
- gateway_unused = False
- if not self._check_cluster_tag(tags=tags):
- if nat_gateway.get('State') == 'available':
- if not self.__check_nat_gateway_in_routes(nat_gateway_id=nat_gateway_id):
- gateway_unused = True
- unused_days = self._get_resource_last_used_days(tags=tags)
- zombie_nat_gateway = self._check_resource_and_delete(resource_name='NatGateway',
- resource_id='NatGatewayId',
- resource_type='CreateNatGateway',
- resource=nat_gateway,
- empty_days=unused_days,
- days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE, tags=tags)
- if zombie_nat_gateway:
- nat_gateway_unused_data.append([nat_gateway_id, self._get_tag_name_from_tags(tags=tags, tag_name='User'), zombie_nat_gateway.get('VpcId'), self._get_policy_value(tags=tags), unused_days])
- else:
- unused_days = 0
- self._update_resource_tags(resource_id=nat_gateway_id, tags=tags, left_out_days=unused_days, resource_left_out=gateway_unused)
- return nat_gateway_unused_data
diff --git a/cloud_governance/policy/aws/s3_inactive.py b/cloud_governance/policy/aws/s3_inactive.py
index 118a36a6..b0ae5eb0 100644
--- a/cloud_governance/policy/aws/s3_inactive.py
+++ b/cloud_governance/policy/aws/s3_inactive.py
@@ -15,7 +15,7 @@ def __init__(self):
def run(self):
"""
- This method return all Empty buckets and delete if dry_run no
+ This method returns all Empty buckets and delete if dry_run no
@return:
"""
return self.__delete_s3_inactive()
@@ -49,7 +49,7 @@ def __delete_s3_inactive(self):
self._cloudtrail.set_cloudtrail(region_name=region)
empty_bucket = self._check_resource_and_delete(resource_name='S3 Bucket', resource_id='Name', resource_type='CreateBucket', resource=bucket, empty_days=empty_days, days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE, tags=tags)
if empty_bucket:
- empty_buckets.append([bucket.get('Name'), self._get_tag_name_from_tags(tags=tags, tag_name='User'), str(bucket.get('CreationDate')), str(empty_days), self._get_policy_value(tags=tags)])
+ empty_buckets.append({'ResourceId': bucket.get('Name'), 'Name': bucket.get('Name'), 'User': self._get_tag_name_from_tags(tags=tags, tag_name='User'), 'Date': str(bucket.get('CreationDate')), 'Days': str(empty_days), 'Skip': self._get_policy_value(tags=tags)})
else:
empty_days = 0
self._update_resource_tags(resource_id=bucket_name, tags=tags, left_out_days=empty_days, resource_left_out=bucket_empty)
diff --git a/cloud_governance/policy/aws/skipped_resources.py b/cloud_governance/policy/aws/skipped_resources.py
index 60c22432..8bcfdd24 100644
--- a/cloud_governance/policy/aws/skipped_resources.py
+++ b/cloud_governance/policy/aws/skipped_resources.py
@@ -28,7 +28,7 @@ def get_volume_type_prices(self):
def get_resources(self, resource_name: str):
"""
- This method return resource data based on resource name
+ This method returns resource data based on resource name
@param resource_name:
@return:
"""
@@ -45,7 +45,7 @@ def get_resources(self, resource_name: str):
def get_ebs_cost(self, volume_id: str):
"""
- This method return the size of the ebs_volume
+ This method returns the size of the ebs_volume
@param volume_id:
@return:
"""
@@ -57,7 +57,7 @@ def get_ebs_cost(self, volume_id: str):
def get_instance_volume_size(self, resource: dict):
"""
- This method return size of the attached volumes of the instance
+ This method returns size of the attached volumes of the instance
@param resource:
@return:
"""
@@ -106,7 +106,7 @@ def get_not_delete_resources(self):
def run(self):
"""
- This method return all tag "Not_Delete" or "skip" resources
+ This method returns all tag "Not_Delete" or "skip" resources
@return:
"""
resources_data = self.get_not_delete_resources()
diff --git a/cloud_governance/policy/aws/spot_savings_analysis.py b/cloud_governance/policy/aws/spot_savings_analysis.py
new file mode 100644
index 00000000..5c0d7e40
--- /dev/null
+++ b/cloud_governance/policy/aws/spot_savings_analysis.py
@@ -0,0 +1,116 @@
+
+from datetime import datetime
+
+import typeguard
+
+from cloud_governance.common.clouds.aws.athena.pyathena_operations import PyAthenaOperations
+from cloud_governance.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations
+from cloud_governance.common.logger.init_logger import logger
+from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp
+from cloud_governance.main.environment_variables import environment_variables
+
+
+class SpotSavingsAnalysis:
+ """
+ This class contain the spt savings analysis reports from the athena query
+ that are gathered from the AWS Cost and Usage Reports.
+ To get reports from the athena:
+ 1. Enable the cost-and-usage reports with support of athena integration
+ 2. Create a Database and table of CUR
+ """
+
+ def __init__(self):
+ self.__environment_variables_dict = environment_variables.environment_variables_dict
+ self.__default_round_digits = self.__environment_variables_dict.get('DEFAULT_ROUND_DIGITS')
+ self.__es_index = self.__environment_variables_dict.get('es_index')
+ self.__database_name = self.__environment_variables_dict.get('ATHENA_DATABASE_NAME')
+ self.__table_name = self.__environment_variables_dict.get('ATHENA_TABLE_NAME')
+ self.__es_operations = ElasticSearchOperations()
+
+ def __get_prepared_query(self):
+ """
+ This method prepare the query
+ :return:
+ """
+ current_date = datetime.utcnow()
+ year = current_date.year
+ current_month = current_date.month
+ previous_month = current_month - 1 if current_month - 1 != 0 else 12
+ query = f"""
+ SELECT
+ date_format(line_item_usage_start_date, '%Y-%m-%d') as CurrentDate,
+ date_format(bill_billing_period_start_date, '%Y-%m-%d') as MonthStartDate,
+ line_item_usage_account_id as AccountId,
+ line_item_product_code as ProductCode,
+ product_region as Region,
+ product_instance_type as InstanceType,
+ cost_category_cost_center as CostCenter,
+ cost_category_o_us as CostCategory,
+ cost_category_organization as RHOrg,
+ ROUND(SUM(discount_total_discount), 3) as TotalDiscount,
+ ROUND(SUM(line_item_usage_amount), {self.__default_round_digits}) as UsageAmount,
+ ROUND(SUM(line_item_unblended_cost + discount_total_discount), {self.__default_round_digits}) as UnblendedCost,
+ ROUND(SUM(pricing_public_on_demand_cost), {self.__default_round_digits}) as OnDemand,
+ ROUND(SUM(pricing_public_on_demand_cost - line_item_unblended_cost), {self.__default_round_digits}) as SpotSavings
+ FROM "{self.__database_name}"."{self.__table_name}"
+ WHERE "product_product_name" = 'Amazon Elastic Compute Cloud'
+ AND "line_item_resource_id" LIKE 'i-%'
+ AND "line_item_operation" LIKE 'RunInstance%'
+ AND "product_marketoption" = 'Spot'
+ AND month(bill_billing_period_start_date) in ({previous_month}, {current_month})
+ AND year(bill_billing_period_start_date) = {year}
+ AND pricing_public_on_demand_cost <> 0
+ GROUP BY 1, 2, 3, 4, 5, 6, 7, 8, 9 ORDER BY MonthStartDate desc
+ """
+ return query
+
+ def __get_query_results(self):
+ """
+ This method get queries data
+ :return:
+ """
+ result = {}
+ query_string = self.__get_prepared_query()
+ if query_string:
+ athena_operations = PyAthenaOperations()
+ result = athena_operations.execute_query(query_string)
+ else:
+ logger.debug(f"query string is not provided, exit without query execution")
+ return result
+
+ @typeguard.typechecked
+ def __get_data_to_upload_to_es(self, athena_data_dictionary: list):
+ """
+ This method returns ready upload dict to upload to elasticsearch
+ :return:
+ """
+ for row_dict in athena_data_dictionary:
+ month_start_date = row_dict.get('MonthStartDate')
+ month_name_year = datetime.strftime(datetime.strptime(month_start_date, '%Y-%m-%d'), '%Y %b')
+ row_dict['Month'] = month_name_year
+ row_dict['filter_date'] = f'{month_start_date}-{month_name_year.split()[-1]}'
+ row_dict['AccountIdCostCenter'] = f'{row_dict.get("AccountId")}-{row_dict.get("CostCenter")}'
+ row_dict['index_id'] = f'{row_dict.get("CurrentDate")}-' \
+ f'{row_dict.get("AccountId")}-' \
+ f'{row_dict.get("Region")}-{row_dict.get("InstanceType")}'
+ row_dict['AWSCostCenter'] = f'AWS-{row_dict.get("CostCenter")}'
+
+ @logger_time_stamp
+ def __collect_reports_and_upload_es(self):
+ """
+ This method collects the data and uploads to elastic search
+ :return:
+ """
+ query_result = self.__get_query_results()
+ if query_result:
+ self.__get_data_to_upload_to_es(athena_data_dictionary=query_result)
+ if query_result:
+ self.__es_operations.upload_data_in_bulk(data_items=query_result, id='index_id', index=self.__es_index)
+
+ @logger_time_stamp
+ def run(self):
+ """
+ This is the starting of the methods
+ :return:
+ """
+ self.__collect_reports_and_upload_es()
diff --git a/cloud_governance/policy/aws/unused_nat_gateway.py b/cloud_governance/policy/aws/unused_nat_gateway.py
new file mode 100644
index 00000000..cdbb214b
--- /dev/null
+++ b/cloud_governance/policy/aws/unused_nat_gateway.py
@@ -0,0 +1,92 @@
+import datetime
+
+
+from cloud_governance.common.clouds.aws.cloudwatch.cloudwatch_operations import CloudWatchOperations
+from cloud_governance.policy.policy_operations.aws.zombie_non_cluster.run_zombie_non_cluster_policies import NonClusterZombiePolicy
+
+
+class UnusedNatGateway(NonClusterZombiePolicy):
+ """
+ This class sends an alert mail for zombie Nat gateways ( based on vpc routes )
+ to the user after 4 days and delete after 7 days.
+ """
+
+ NAMESPACE = 'AWS/NATGateway'
+ UNUSED_DAYS = 1
+
+ def __init__(self):
+ super().__init__()
+ self._cloudwatch = CloudWatchOperations(region=self._region)
+
+ def __check_cloud_watch_logs(self, resource_id: str, days: int = UNUSED_DAYS):
+ """
+ This method returns weather the NatGateway is used in last input days
+ :param resource_id:
+ :param days:
+ :return:
+ """
+ if days == 0:
+ days = 1
+ end_time = datetime.datetime.utcnow()
+ start_time = end_time - datetime.timedelta(days=days)
+ response = self._cloudwatch.get_metric_data(start_time=start_time, end_time=end_time, resource_id=resource_id,
+ resource_type='NatGatewayId', namespace=self.NAMESPACE,
+ metric_names={'ActiveConnectionCount': 'Count'},
+ statistic='Average')['MetricDataResults'][0]
+ for value in response.get('Values'):
+ if value > 0:
+ return False
+ return True
+
+ def __check_nat_gateway_in_routes(self, nat_gateway_id: str):
+ """
+ This method check the nat gateway present in the routes or not.
+ :param nat_gateway_id:
+ :return:
+ """
+ route_tables = self._ec2_client.describe_route_tables()['RouteTables']
+ nat_gateway_found = False
+ for route_table in route_tables:
+ for route in route_table.get('Routes'):
+ if route.get('NatGatewayId') == nat_gateway_id:
+ nat_gateway_found = True
+ return nat_gateway_found
+
+ def run(self):
+ """
+ This method returns zombie NatGateways, delete if dry_run no
+ @return:
+ """
+ nat_gateways = self._ec2_operations.get_nat_gateways()
+ nat_gateway_unused_data = []
+ for nat_gateway in nat_gateways:
+ if self._get_policy_value(tags=nat_gateway.get('Tags', [])) not in ('NOTDELETE', 'SKIP'):
+ nat_gateway_id = nat_gateway.get('NatGatewayId')
+ tags = nat_gateway.get('Tags')
+ gateway_unused = False
+ last_used_days = int(self._ec2_operations.get_tag_value_from_tags(tags=tags, tag_name='LastUsedDay', default_value=1))
+ if not self._check_cluster_tag(tags=tags):
+ if nat_gateway.get('State') == 'available':
+ if not self.__check_nat_gateway_in_routes(nat_gateway_id=nat_gateway_id) or self.__check_cloud_watch_logs(days=last_used_days, resource_id=nat_gateway_id):
+ gateway_unused = True
+ unused_days = self._get_resource_last_used_days(tags=tags)
+ zombie_nat_gateway = self._check_resource_and_delete(resource_name='NatGateway',
+ resource_id='NatGatewayId',
+ resource_type='CreateNatGateway',
+ resource=nat_gateway,
+ empty_days=unused_days,
+ days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE,
+ tags=tags)
+ if zombie_nat_gateway:
+ nat_gateway_unused_data.append(
+ {'ResourceId': nat_gateway_id,
+ 'Name': self._get_tag_name_from_tags(tags=tags, tag_name='Name'),
+ 'User': self._get_tag_name_from_tags(tags=tags, tag_name='User'),
+ 'VpcId': zombie_nat_gateway.get('VpcId'),
+ 'Skip': self._get_policy_value(tags=tags),
+ 'Days': unused_days, 'Policy': self._policy})
+ else:
+ unused_days = 0
+ self._update_resource_tags(resource_id=nat_gateway_id, tags=tags, left_out_days=unused_days,
+ resource_left_out=gateway_unused)
+ return nat_gateway_unused_data
diff --git a/cloud_governance/policy/aws/zombie_cluster_resource.py b/cloud_governance/policy/aws/zombie_cluster_resource.py
index 00743617..203e9fac 100644
--- a/cloud_governance/policy/aws/zombie_cluster_resource.py
+++ b/cloud_governance/policy/aws/zombie_cluster_resource.py
@@ -38,7 +38,7 @@ def __init__(self, cluster_prefix: str = None, delete: bool = False, region: str
def all_cluster_instance(self):
"""
- This method return list of cluster's instance tag name that contains openshift tag prefix from all regions
+ This method returns list of cluster's instance tag name that contains openshift tag prefix from all regions
:return: list of cluster's instance tag name
"""
instances_list = []
@@ -63,7 +63,7 @@ def all_cluster_instance(self):
def _cluster_instance(self):
"""
- This method return list of cluster's instance tag name that contains openshift tag prefix
+ This method returns list of cluster's instance tag name that contains openshift tag prefix
:return: list of cluster's instance tag name
"""
instances_list = []
@@ -85,7 +85,7 @@ def _cluster_instance(self):
def __get_cluster_resources(self, resources_list: list, input_resource_id: str, tags: str = 'Tags'):
"""
- This method return all cluster resources keys that start with cluster prefix
+ This method returns all cluster resources keys that start with cluster prefix
:param resources_list:
:param tags:
:return: dictionary of the resources key and id
@@ -181,7 +181,7 @@ def __get_all_zombie_resources(self, exist_resources: dict):
def zombie_cluster_volume(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of cluster's volume according to cluster tag name and cluster name data
+ This method returns list of cluster's volume according to cluster tag name and cluster name data
delete only available resource that related to cluster
"""
available_volumes = []
@@ -200,14 +200,14 @@ def zombie_cluster_volume(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
if delete_cluster_resource and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource_id=zombie, resource='ec2_volume')
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource_id=zombie, resource='ec2_volume')
return zombies, cluster_left_out_days
def zombie_cluster_ami(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of cluster's ami according to cluster tag name and cluster name data
+ This method returns list of cluster's ami according to cluster tag name and cluster name data
"""
images_data = self.ec2_operations.get_images()
exist_ami = self.__get_cluster_resources(resources_list=images_data, input_resource_id='ImageId')
@@ -225,7 +225,7 @@ def zombie_cluster_ami(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
self.ec2_client.deregister_image(ImageId=zombie)
logger.info(f'deregister_image: {zombie}')
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
self.ec2_client.deregister_image(ImageId=zombie)
logger.info(f'deregister_image: {zombie}')
except Exception as err:
@@ -234,7 +234,7 @@ def zombie_cluster_ami(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
def zombie_cluster_snapshot(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of cluster's snapshot according to cluster tag name and cluster name data
+ This method returns list of cluster's snapshot according to cluster tag name and cluster name data
"""
snapshots_data = self.ec2_operations.get_snapshots()
exist_snapshot = self.__get_cluster_resources(resources_list=snapshots_data, input_resource_id='SnapshotId')
@@ -247,7 +247,7 @@ def zombie_cluster_snapshot(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
if delete_cluster_resource and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource='ebs_snapshots', resource_id=zombie)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource='ebs_snapshots', resource_id=zombie)
return zombies, cluster_left_out_days
@@ -318,7 +318,7 @@ def __get_zombies_by_vpc_id(self, vpc_id: str, resources: list, output_tag: str,
def zombie_cluster_security_group(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of zombie cluster's security groups compare to existing instances and cluster name data
+ This method returns list of zombie cluster's security groups compare to existing instances and cluster name data
:return: list of zombie cluster's security groups
"""
security_groups = self.ec2_operations.get_security_groups()
@@ -338,7 +338,7 @@ def zombie_cluster_security_group(self, vpc_id: str = '', cluster_tag_vpc: str =
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource('security_group', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource('security_group', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag)
@@ -346,7 +346,7 @@ def zombie_cluster_security_group(self, vpc_id: str = '', cluster_tag_vpc: str =
def zombie_cluster_elastic_ip(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of zombie cluster's elastic ip according to existing instances and cluster name data
+ This method returns list of zombie cluster's elastic ip according to existing instances and cluster name data
"""
exist_elastic_ip_association = []
exist_elastic_ip_allocation = []
@@ -377,14 +377,14 @@ def zombie_cluster_elastic_ip(self, vpc_id: str = '', cluster_tag_vpc: str = '')
if delete_cluster_resource and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource='elastic_ip', resource_id=zombie, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource='elastic_ip', resource_id=zombie, cluster_tag=cluster_tag)
zombies = {**zombies_all}
return zombies, cluster_left_out_days
def zombie_cluster_network_interface(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of zombie cluster's network interface according to existing instances and cluster name data
+ This method returns list of zombie cluster's network interface according to existing instances and cluster name data
"""
network_interfaces_data = self.ec2_operations.get_network_interface()
exist_network_interface = self.__get_cluster_resources(resources_list=network_interfaces_data, input_resource_id='NetworkInterfaceId', tags='TagSet')
@@ -406,14 +406,14 @@ def zombie_cluster_network_interface(self, vpc_id: str = '', cluster_tag_vpc: st
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='network_interface', resource_id=zombie_id, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='network_interface', resource_id=zombie_id, cluster_tag=cluster_tag)
return zombies, cluster_left_out_days
def zombie_cluster_load_balancer(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of cluster's load balancer according to cluster vpc and cluster name data
+ This method returns list of cluster's load balancer according to cluster vpc and cluster name data
"""
exist_load_balancer = {}
@@ -441,13 +441,13 @@ def zombie_cluster_load_balancer(self, vpc_id: str = '', cluster_tag_vpc: str =
if delete_cluster_resource and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource='load_balancer', resource_id=zombie, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource='load_balancer', resource_id=zombie, cluster_tag=cluster_tag)
return zombies, cluster_left_out_days
def zombie_cluster_load_balancer_v2(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of cluster's load balancer according to cluster vpc and cluster name data
+ This method returns list of cluster's load balancer according to cluster vpc and cluster name data
"""
exist_load_balancer = {}
load_balancers_data = self.ec2_operations.get_load_balancers_v2()
@@ -474,13 +474,13 @@ def zombie_cluster_load_balancer_v2(self, vpc_id: str = '', cluster_tag_vpc: str
if delete_cluster_resource and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource='load_balancer_v2', resource_id=zombie, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource='load_balancer_v2', resource_id=zombie, cluster_tag=cluster_tag)
return zombies, cluster_left_out_days
def __get_all_exist_vpcs(self):
"""
- This method return all exist vpc ids (for supporting Network ACL - missing OpenShift tags)
+ This method returns all exist vpc ids (for supporting Network ACL - missing OpenShift tags)
:return:
"""
vpcs_data = self.ec2_operations.get_vpcs()
@@ -491,7 +491,7 @@ def __get_all_exist_vpcs(self):
def zombie_cluster_vpc(self):
"""
- This method return list of cluster's vpc according to cluster tag name and cluster name data
+ This method returns list of cluster's vpc according to cluster tag name and cluster name data
"""
vpcs_data = self.ec2_operations.get_vpcs()
exist_vpc = self.__get_cluster_resources(resources_list=vpcs_data, input_resource_id='VpcId')
@@ -510,14 +510,14 @@ def zombie_cluster_vpc(self):
if delete_cluster_resource and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource='vpc', resource_id=zombie, pending_resources=delete_dict, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
self.delete_ec2_resource.delete_zombie_resource(resource='vpc', resource_id=zombie, pending_resources=delete_dict, cluster_tag=cluster_tag)
return zombies, cluster_left_out_days
def zombie_cluster_subnet(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of cluster's subnet according to cluster tag name and cluster name data
+ This method returns list of cluster's subnet according to cluster tag name and cluster name data
"""
subnets_data = self.ec2_operations.get_subnets()
exist_subnet = self.__get_cluster_resources(resources_list=subnets_data, input_resource_id='SubnetId')
@@ -537,14 +537,14 @@ def zombie_cluster_subnet(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='subnet', resource_id=zombie_id, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='subnet', resource_id=zombie_id, cluster_tag=cluster_tag)
return zombies, cluster_left_out_days
def zombie_cluster_route_table(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of cluster's route table according to cluster tag name and cluster name data
+ This method returns list of cluster's route table according to cluster tag name and cluster name data
"""
route_tables_data = self.ec2_operations.get_route_tables()
exist_route_table = self.__get_cluster_resources(resources_list=route_tables_data, input_resource_id='RouteTableId')
@@ -564,14 +564,14 @@ def zombie_cluster_route_table(self, vpc_id: str = '', cluster_tag_vpc: str = ''
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='route_table', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='route_table', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag)
return zombies, cluster_left_out_days
def zombie_cluster_internet_gateway(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of cluster's route table internet gateway according to cluster tag name and cluster name data
+ This method returns list of cluster's route table internet gateway according to cluster tag name and cluster name data
"""
internet_gateways_data = self.ec2_operations.get_internet_gateways()
exist_internet_gateway = self.__get_cluster_resources(resources_list=internet_gateways_data, input_resource_id='InternetGatewayId')
@@ -593,7 +593,7 @@ def zombie_cluster_internet_gateway(self, vpc_id: str = '', cluster_tag_vpc: str
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='internet_gateway', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='internet_gateway', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag)
@@ -601,7 +601,7 @@ def zombie_cluster_internet_gateway(self, vpc_id: str = '', cluster_tag_vpc: str
def zombie_cluster_dhcp_option(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of cluster's dhcp option according to cluster tag name and cluster name data
+ This method returns list of cluster's dhcp option according to cluster tag name and cluster name data
"""
dhcp_options_data = self.ec2_operations.get_dhcp_options()
exist_dhcp_option = self.__get_cluster_resources(resources_list=dhcp_options_data, input_resource_id='DhcpOptionsId')
@@ -619,7 +619,7 @@ def zombie_cluster_dhcp_option(self, vpc_id: str = '', cluster_tag_vpc: str = ''
else:
self.delete_ec2_resource.delete_zombie_resource(resource='dhcp_options', resource_id=zombie)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
if vpc_id:
self.delete_ec2_resource.delete_zombie_resource(resource='dhcp_options', resource_id=zombie, vpc_id=vpc_id)
else:
@@ -628,7 +628,7 @@ def zombie_cluster_dhcp_option(self, vpc_id: str = '', cluster_tag_vpc: str = ''
def zombie_cluster_vpc_endpoint(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of cluster's vpc endpoint according to cluster tag name and cluster name data
+ This method returns list of cluster's vpc endpoint according to cluster tag name and cluster name data
"""
vpc_endpoints_data = self.ec2_operations.get_vpce()
exist_vpc_endpoint = self.__get_cluster_resources(resources_list=vpc_endpoints_data, input_resource_id='VpcEndpointId')
@@ -650,14 +650,14 @@ def zombie_cluster_vpc_endpoint(self, vpc_id: str = '', cluster_tag_vpc: str = '
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='vpc_endpoints', resource_id=zombie_id, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='vpc_endpoints', resource_id=zombie_id, cluster_tag=cluster_tag)
return zombies, cluster_left_out_days
def zombie_cluster_nat_gateway(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of zombie cluster's nat gateway according to cluster tag name and cluster name data
+ This method returns list of zombie cluster's nat gateway according to cluster tag name and cluster name data
"""
nat_gateways_data = self.ec2_operations.get_nat_gateways()
exist_nat_gateway = self.__get_cluster_resources(resources_list=nat_gateways_data, input_resource_id='NatGatewayId')
@@ -676,14 +676,14 @@ def zombie_cluster_nat_gateway(self, vpc_id: str = '', cluster_tag_vpc: str = ''
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='nat_gateways', resource_id=zombie_id, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='nat_gateways', resource_id=zombie_id, cluster_tag=cluster_tag)
return zombies, cluster_left_out_days
def zombie_cluster_network_acl(self, vpc_id: str = '', cluster_tag_vpc: str = ''):
"""
- This method return list of zombie cluster's network acl according to existing vpc id and cluster name data
+ This method returns list of zombie cluster's network acl according to existing vpc id and cluster name data
"""
exist_network_acl = {}
network_acls_data = self.ec2_operations.get_nacls()
@@ -713,14 +713,14 @@ def zombie_cluster_network_acl(self, vpc_id: str = '', cluster_tag_vpc: str = ''
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='network_acl', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
for zombie_id in zombie_ids:
self.delete_ec2_resource.delete_zombie_resource(resource='network_acl', resource_id=zombie_id, vpc_id=vpc_id, cluster_tag=cluster_tag)
return zombies, cluster_left_out_days
def zombie_cluster_role(self):
"""
- This method return list of cluster's role in all regions according to cluster name and cluster name data
+ This method returns list of cluster's role in all regions according to cluster name and cluster name data
* Role is a global resource, need to scan for live cluster in all regions
"""
exist_role_name_tag = {}
@@ -751,13 +751,13 @@ def zombie_cluster_role(self):
if delete_cluster_resource and self.delete:
self.delete_iam_resource.delete_iam_zombie_resource(resource_id=zombie, resource_type='iam_role')
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
self.delete_iam_resource.delete_iam_zombie_resource(resource_id=zombie, resource_type='iam_role')
return zombies, cluster_left_out_days
def zombie_cluster_user(self):
"""
- This method return list of cluster's user according to cluster name and cluster name data
+ This method returns list of cluster's user according to cluster name and cluster name data
* User is a global resource, need to scan for live cluster in all regions
"""
exist_user_name_tag = {}
@@ -785,14 +785,14 @@ def zombie_cluster_user(self):
if delete_cluster_resource and self.delete:
self.delete_iam_resource.delete_iam_zombie_resource(resource_id=zombie, resource_type='iam_user')
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
self.delete_iam_resource.delete_iam_zombie_resource(resource_id=zombie, resource_type='iam_user')
return zombies, cluster_left_out_days
def zombie_cluster_s3_bucket(self, cluster_stamp: str = 'image-registry'):
"""
- This method return list of cluster's s3 bucket according to cluster name and cluster name data
+ This method returns list of cluster's s3 bucket according to cluster name and cluster name data
* S3 is a global resource, need to scan for live cluster in all regions
"""
exist_bucket_name_tag = {}
@@ -824,7 +824,7 @@ def zombie_cluster_s3_bucket(self, cluster_stamp: str = 'image-registry'):
if delete_cluster_resource and self.delete:
self.delete_s3_resource.delete_zombie_s3_resource(resource_type='s3_bucket', resource_id=zombie)
else:
- if self._force_delete:
+ if self._force_delete and self.delete:
self.delete_s3_resource.delete_zombie_s3_resource(resource_type='s3_bucket', resource_id=zombie)
return zombies, cluster_left_out_days
diff --git a/cloud_governance/policy/aws/zombie_snapshots.py b/cloud_governance/policy/aws/zombie_snapshots.py
index 7a8ca2d7..58b15873 100644
--- a/cloud_governance/policy/aws/zombie_snapshots.py
+++ b/cloud_governance/policy/aws/zombie_snapshots.py
@@ -25,7 +25,7 @@ def _get_image_ids_from_description(self, snapshot_description: str):
def run(self):
"""
- This method return all the zombie snapshots, delete if dry_run no
+ This method returns all the zombie snapshots, delete if dry_run no
@return:
"""
snapshots = self._ec2_operations.get_snapshots()
@@ -51,12 +51,13 @@ def run(self):
days_to_delete_resource=self.DAYS_TO_DELETE_RESOURCE,
tags=tags)
if zombie_snapshot:
- zombie_snapshots.append([snapshot.get('SnapshotId'),
- self._get_tag_name_from_tags(tags=tags),
- self._get_tag_name_from_tags(tags=tags, tag_name='User'),
- f'{str(snapshot.get("VolumeSize"))}Gb',
- self._get_policy_value(tags=snapshot.get('Tags')), str(unused_days)
- ])
+ zombie_snapshots.append({'ResourceId': snapshot.get('SnapshotId'),
+ 'Name': self._get_tag_name_from_tags(tags=tags),
+ 'User': self._get_tag_name_from_tags(tags=tags, tag_name='User'),
+ 'Size': f'{str(snapshot.get("VolumeSize"))}Gb',
+ 'Skip': self._get_policy_value(tags=snapshot.get('Tags')),
+ 'Days': str(unused_days)
+ })
else:
unused_days = 0
self._update_resource_tags(resource_id=snapshot_id, tags=tags, left_out_days=unused_days,
diff --git a/cloud_governance/policy/azure/cost_billing_reports.py b/cloud_governance/policy/azure/cost_billing_reports.py
index 2a80a74a..ad7c11fa 100644
--- a/cloud_governance/policy/azure/cost_billing_reports.py
+++ b/cloud_governance/policy/azure/cost_billing_reports.py
@@ -17,9 +17,6 @@ class CostBillingReports:
This class is responsible for generation cost billing report for Budget, Actual, Forecast
"""
- COST_CENTER_OWNER = 'Shai'
- COST_CENTER_OWNER_OTHERS = 'Others'
-
def __init__(self):
self.__environment_variables_dict = environment_variables.environment_variables_dict
self.__total_account = self.__environment_variables_dict.get('TOTAL_ACCOUNTS', '')
@@ -29,7 +26,7 @@ def __init__(self):
self.gdrive_operations = GoogleDriveOperations()
self.__gsheet_id = self.__environment_variables_dict.get('SPREADSHEET_ID')
self.update_to_gsheet = UploadToGsheet()
- self.__cost_center, self.__allocated_budget, self.__years = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.azure_operations.subscription_id, dir_path='/tmp')
+ self.__cost_center, self.__allocated_budget, self.__years, self.__owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.azure_operations.subscription_id, dir_path='/tmp')
self.__common_data = self.get_common_data()
def get_common_data(self):
@@ -44,7 +41,7 @@ def get_common_data(self):
upload_data['AllocatedBudget'] = 0
upload_data['CostCenter'] = int(self.__cost_center)
upload_data['CloudName'] = self.azure_operations.cloud_name
- upload_data['Owner'] = self.COST_CENTER_OWNER_OTHERS
+ upload_data['Owner'] = self.__owner
return upload_data
@logger_time_stamp
@@ -66,12 +63,12 @@ def get_data_from_costs(self, cost_data_rows: list, cost_data_columns: list, cos
if cost_center > 0:
common_data['CostCenter'] = cost_center
if subscription_id:
- cost_center, allocated_budget, years = self.update_to_gsheet.get_cost_center_budget_details(account_id=subscription_id, dir_path='/tmp')
+ cost_center, allocated_budget, years, owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=subscription_id, dir_path='/tmp')
if cost_center:
common_data['CostCenter'] = int(cost_center)
- common_data['Owner'] = self.COST_CENTER_OWNER
+ common_data['Owner'] = owner
else:
- common_data['Owner'] = self.COST_CENTER_OWNER_OTHERS
+ common_data['Owner'] = owner
else:
allocated_budget, years = self.__allocated_budget, self.__years
for index, item in enumerate(cost_data_rows):
@@ -87,12 +84,12 @@ def get_data_from_costs(self, cost_data_rows: list, cost_data_columns: list, cos
common_data['Account'] = item[key]
elif column.get('name') == 'SubscriptionId':
common_data['AccountId'] = item[key]
- cost_center, allocated_budget, years = self.update_to_gsheet.get_cost_center_budget_details(account_id=item[key], dir_path='/tmp')
+ cost_center, allocated_budget, years, owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=item[key], dir_path='/tmp')
if cost_center:
common_data['CostCenter'] = int(cost_center)
- common_data['Owner'] = self.COST_CENTER_OWNER
+ common_data['Owner'] = owner
else:
- common_data['Owner'] = self.COST_CENTER_OWNER_OTHERS
+ common_data['Owner'] = owner
else:
if column.get('type') == 'Datetime':
start_date = item[key].split('T')[0]
diff --git a/cloud_governance/policy/common_policies/__init__.py b/cloud_governance/policy/common_policies/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/cloud_governance/policy/common_policies/send_aggregated_alerts.py b/cloud_governance/policy/common_policies/send_aggregated_alerts.py
new file mode 100644
index 00000000..741a7444
--- /dev/null
+++ b/cloud_governance/policy/common_policies/send_aggregated_alerts.py
@@ -0,0 +1,159 @@
+import json
+import logging
+import os
+import tempfile
+from datetime import date, datetime, timedelta
+
+import typeguard
+from botocore.exceptions import ClientError
+
+from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations
+from cloud_governance.common.clouds.aws.s3.s3_operations import S3Operations
+from cloud_governance.common.jira.jira import logger
+from cloud_governance.common.logger.init_logger import handler
+from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp
+from cloud_governance.common.mails.mail_message import MailMessage
+from cloud_governance.common.mails.postfix import Postfix
+from cloud_governance.main.environment_variables import environment_variables
+
+
+class SendAggregatedAlerts:
+ """
+ This class send alerts to users which conditions are not satisfied by the policies
+ """
+
+ FILE_NAME = 'resources.json'
+ GLOBAL_REGION = 'us-east-1'
+ TODAY_DATE = str(date.today()).replace('-', '/')
+
+ def __init__(self):
+ self.__environment_variables = environment_variables.environment_variables_dict
+ self.__bucket_name = self.__environment_variables.get('BUCKET_NAME')
+ self.__bucket_key = self.__environment_variables.get('BUCKET_KEY')
+ self.__policies = self.__environment_variables.get('POLICIES_TO_ALERT')
+ self.__s3_operations = S3Operations(region_name='us-east-2', bucket=self.__bucket_name, logs_bucket_key=self.__bucket_key)
+ self.__active_regions = EC2Operations().get_active_regions()
+ self.__kerberos_users = self.__get_kerberos_users_for_iam_users()
+ self.__global_region_policies = ['s3-inactive', 'empty-roles']
+ self.__mail_alert_days = self.__environment_variables.get('MAIL_ALERT_DAYS')
+ self.__policy_action_days = self.__environment_variables.get('POLICY_ACTIONS_DAYS')
+ self.__mail_message = MailMessage()
+ self.__postfix = Postfix()
+
+ @logger_time_stamp
+ def __get_kerberos_users_for_iam_users(self):
+ """
+ This method returns the users which IAM users are not kerberos username
+ :return:
+ """
+ responses = {}
+ users = self.__environment_variables.get('KERBEROS_USERS')
+ for iam_user, kerberos_user in users.items():
+ responses[iam_user.lower()] = kerberos_user.lower()
+ return responses
+
+ def __get_users_agg_result(self, policy_result: list, agg_users_result: dict, policy_name: str, region: str):
+ """
+ This method returns the aggregated users resources list
+ :param agg_users_result:
+ :param policy_result:
+ :return:
+ """
+ if policy_result:
+ for response in policy_result:
+ if type(response) == dict:
+ skip_policy = response.get('Skip')
+ if skip_policy in ('NA', '', None):
+ user = response.pop('User').lower()
+ response['Region'] = region
+ response['Policy'] = policy_name
+ if user in self.__kerberos_users.keys():
+ user = self.__kerberos_users.get(user)
+ agg_users_result.setdefault(user, []).append(response)
+
+ def __get_policy_data_in_bucket(self, region: str, policy: str):
+ """
+ This method returns the policy data in s3 bucket
+ :param region:
+ :param policy:
+ :return:
+ """
+ try:
+ policy_save_path = f'{self.__bucket_key}/{region}/{policy}'
+ bucket_path_file = self.__s3_operations.get_last_objects(bucket=self.__bucket_name, key_prefix=f'{policy_save_path}/{self.TODAY_DATE}')
+ policy_s3_response = self.__s3_operations.get_last_s3_policy_content(s3_file_path=bucket_path_file, file_name=self.FILE_NAME)
+ return json.loads(policy_s3_response) if policy_s3_response else []
+ except ClientError as err:
+ logger.info(err)
+ return []
+
+ @logger_time_stamp
+ def __get_policy_users_list(self):
+ """
+ This method gets the latest policy responses
+ :return:
+ """
+ agg_users_result = {}
+ for policy in self.__policies:
+ run_global_region = True if policy in self.__global_region_policies else False
+ for region in self.__active_regions:
+ if (region == self.GLOBAL_REGION and run_global_region) or not run_global_region:
+ self.__get_users_agg_result(policy_result=self.__get_policy_data_in_bucket(region=region, policy=policy),
+ agg_users_result=agg_users_result, policy_name=policy, region=region)
+ if region == self.GLOBAL_REGION and run_global_region:
+ break
+ return agg_users_result
+
+ def __get_policy_agg_data_by_region(self, policy_data: dict):
+ """
+ This method returns the policy data agg by region
+ :param policy_data:
+ :return:
+ """
+ agg_policy_region_result = {}
+ for policy_name, policy_region_data in policy_data.items():
+ agg_policy_region_result[policy_name] = {}
+ for region_data in policy_region_data:
+ region_name = region_data.get('Region').lower()
+ agg_policy_region_result[policy_name].setdefault(region_name, []).append(region_data)
+ return agg_policy_region_result
+
+ @logger_time_stamp
+ def __get_policy_agg_data(self, user_policy_data: list):
+ """
+ This method returns the data agg by policy
+ :param user_policy_data:
+ :return:
+ """
+ agg_policy_result = {}
+ for result in user_policy_data:
+ policy_name = result.get('Policy').lower()
+ days = int(result.get('Days', 0))
+ if days in self.__mail_alert_days or days in self.__policy_action_days:
+ result['Action'] = 'Deleted' if days in self.__policy_action_days else 'Monitoring'
+ result['DeletedDay'] = (datetime.now() + timedelta(days=self.__policy_action_days[0] - days)).date()
+ agg_policy_result.setdefault(policy_name, []).append(result)
+ return self.__get_policy_agg_data_by_region(policy_data=agg_policy_result)
+
+ @logger_time_stamp
+ def __send_mail_alerts_to_users(self):
+ """
+ This method send mail alerts to users
+ :return:
+ """
+ policy_agg_users_list = self.__get_policy_users_list()
+ for user, user_policy_data in policy_agg_users_list.items():
+ handler.setLevel(logging.WARN)
+ agg_policy_data = self.__get_policy_agg_data(user_policy_data=user_policy_data)
+ if agg_policy_data:
+ handler.setLevel(logging.INFO)
+ subject, body = self.__mail_message.get_agg_policies_mail_message(user=user, user_resources=agg_policy_data)
+ self.__postfix.send_email_postfix(subject=subject, content=body, to=user, cc=[], mime_type='html')
+
+ @logger_time_stamp
+ def run(self):
+ """
+ This method start the other methods
+ :return:
+ """
+ self.__send_mail_alerts_to_users()
diff --git a/cloud_governance/policy/gcp/__init__.py b/cloud_governance/policy/gcp/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/cloud_governance/policy/gcp/cost_billing_reports.py b/cloud_governance/policy/gcp/cost_billing_reports.py
new file mode 100644
index 00000000..668a54fd
--- /dev/null
+++ b/cloud_governance/policy/gcp/cost_billing_reports.py
@@ -0,0 +1,293 @@
+import json
+import os
+from datetime import datetime, timedelta
+from ast import literal_eval
+
+from typeguard import typechecked
+
+from cloud_governance.common.clouds.gcp.google_account import GoogleAccount
+from cloud_governance.common.elasticsearch.elastic_upload import ElasticUpload
+from cloud_governance.common.google_drive.upload_to_gsheet import UploadToGsheet
+from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp
+from cloud_governance.main.environment_variables import environment_variables
+from cloud_governance.common.logger.init_logger import logger
+
+
+class CostBillingReports:
+ """
+ This class is responsible for generation cost billing report for Budget, Actual, Forecast
+ """
+
+ DEFAULT_YEARS = 12
+ DEFAULT_ROUND_DIGITS = 3
+
+ def __init__(self):
+ self.__environment_variables_dict = environment_variables.environment_variables_dict
+ self.__database_name = self.__environment_variables_dict.get('GCP_DATABASE_NAME', '')
+ self.__table_name = self.__environment_variables_dict.get('GCP_DATABASE_TABLE_NAME', '')
+ self.__gcp_account = GoogleAccount()
+ self.__gsheet_id = self.__environment_variables_dict.get('SPREADSHEET_ID')
+ self.__cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME').upper()
+ self.update_to_gsheet = UploadToGsheet()
+ self.elastic_upload = ElasticUpload()
+
+ @logger_time_stamp
+ def __next_twelve_months(self):
+ """
+ This method returns the next 12 month, year
+ :return:
+ """
+ months = 12
+ year = datetime.now().year
+ next_month = datetime.now().month + 1
+ month_year = []
+ for idx in range(months):
+ month = str((idx+next_month) % months)
+ c_year = year
+ if len(month) == 1:
+ month = f'0{month}'
+ if month == '00':
+ month = 12
+ year = year+1
+ month_year.append((str(month), c_year))
+ return month_year
+
+ @typechecked()
+ @logger_time_stamp
+ def __prepare_usage_query(self, first_year_month: str = None, second_year_month: str = None):
+ """
+ This method prepare the query for usage
+ :param first_year_month: #YYYYMM
+ :param second_year_month: #YYYYMM
+ :return:
+ """
+ if not first_year_month and not second_year_month:
+ current_month = datetime.now().replace(day=1)
+ past_month = current_month - timedelta(days=1)
+ first_year_month = past_month.strftime("%Y%m")
+ second_year_month = current_month.strftime("%Y%m")
+ logger.info(f'StartMonth: {first_year_month}, EndMonth: {second_year_month}')
+ fetch_monthly_invoice_query = f"""
+ SELECT ifnull(project.ancestors[SAFE_OFFSET(1)].display_name, 'NA') as folder_name,
+ ifnull(project.ancestry_numbers, 'NA') as folder_id, invoice.month, ifnull(project.id, 'GCP-refund/credit') as project_name, ifnull(project.number, '000000000000') as project_id,
+ (SUM(CAST(cost AS NUMERIC)) + SUM(IFNULL((SELECT SUM(CAST(c.amount AS NUMERIC))
+ FROM UNNEST(credits) AS c), 0))) AS total_cost
+ FROM `{self.__database_name}.{self.__table_name}`
+ where invoice.month BETWEEN '{first_year_month}' AND '{second_year_month}'
+ GROUP BY 1, 2, 3, 4, 5
+ ORDER BY 3
+ """
+ fetch_monthly_folders_query = f"""
+ SELECT TO_JSON_STRING(project.ancestors) as project_folders, project.number, invoice.month, ifnull(project.ancestry_numbers, 'NA') as folder_id
+ FROM `{self.__database_name}.{self.__table_name}`
+ where invoice.month BETWEEN '{first_year_month}' AND '{second_year_month}' GROUP BY 1, 2, 3, 4 ORDER BY invoice.month
+ """
+ return [fetch_monthly_invoice_query, fetch_monthly_folders_query]
+
+ @typechecked()
+ @logger_time_stamp
+ def __organized_results(self, data_rows: list):
+ """
+ This method organize the results to be uploaded to elastic search
+ :param data_rows:
+ :return:
+ """
+ compress_gcp_data = {} # compress data based on budget_id
+ for row in data_rows:
+ month = row.get('Month')
+ cost_center, allocated_budget, years, owner = 0, 0, '', 'Others'
+ project_budget_account_id = 0
+ for idx, _id in enumerate((row.get('folder_ids')+[row.get('ProjectId')])[::-1]): # start from reverse [root, sub_child, child]
+ cost_center, allocated_budget, years, owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=_id, dir_path='/tmp')
+ if cost_center > 0:
+ project_budget_account_id = _id
+ break
+ parent_index = len(row.get("folder_ids"))
+ index = f'{project_budget_account_id}-{month}'
+ if index in compress_gcp_data:
+ compress_gcp_data[index]['Actual'] = round(compress_gcp_data[index]['Actual'] + row.get('Actual'), 3)
+ compress_gcp_data[index]['Projects'].append({
+ 'Project': row.get('Project'),
+ 'Actual': round(row.get('Actual'), self.DEFAULT_ROUND_DIGITS),
+ 'ProjectId': row.get('ProjectId')
+ })
+ if parent_index > compress_gcp_data[index]['total_folders']:
+ compress_gcp_data[index]['Account'] = row.get(f'parent{parent_index}', 'NA')
+ compress_gcp_data[index]['AccountId'] = row.get(f'parent{parent_index}_id', 'NA')
+ else:
+ project_cost_data = {'CloudName': self.__cloud_name, 'CostCenter': cost_center, 'Owner': owner,
+ 'Budget': round(allocated_budget / self.DEFAULT_YEARS, self.DEFAULT_ROUND_DIGITS),
+ 'Forecast': 0,
+ 'AllocatedBudget': round(allocated_budget, self.DEFAULT_ROUND_DIGITS),
+ 'BudgetId': project_budget_account_id,
+ 'Account': row.get(f'parent{parent_index}', 'NA'),
+ 'AccountId': row.get(f'parent{parent_index}_id', 'NA'),
+ 'Actual': round(row.get('Actual'), self.DEFAULT_ROUND_DIGITS),
+ 'filter_date': row.get('filter_date'), 'Month': row.get('Month'),
+ 'start_date': row.get('start_date'), 'timestamp': row.get('timestamp'),
+ 'Projects': [{'Project': row.get('Project'),
+ 'Actual': round(row.get('Actual'), self.DEFAULT_ROUND_DIGITS),
+ 'ProjectId': row.get('ProjectId')}],
+ 'index_id': f"{row.get('start_date')}-{row.get(f'parent{parent_index}', 'NA').lower()}",
+ 'total_folders': parent_index}
+ compress_gcp_data[index] = project_cost_data
+ return self.__second_layer_filter(items=list(compress_gcp_data.values()))
+
+ @typechecked()
+ @logger_time_stamp
+ def __second_layer_filter(self, items: list):
+ """
+ This method aggregates the results which have the same Account name
+ :param items:
+ :return:
+ """
+ filtered_result = {}
+ for item in items:
+ account = item.get('Account')
+ month = item.get('Month')
+ index = f'{account}-{month}'
+ if index in filtered_result:
+ filtered_result[index]['Budget'] += item.get('Budget')
+ filtered_result[index]['Actual'] += item.get('Actual')
+ if item.get('BudgetId') != filtered_result[index]['BudgetId']:
+ filtered_result[index]['AllocatedBudget'] += item.get('AllocatedBudget')
+ filtered_result[index]['Projects'].extend(item.get('Projects'))
+ else:
+ filtered_result[index] = item
+ return list(filtered_result.values())
+
+ # @Todo Add forecast values in future
+ @typechecked()
+ @logger_time_stamp
+ def __forecast_for_next_months(self, cost_data: list):
+ """
+ This method returns the forecast of next twelve months data
+ :param cost_data:
+ :return:
+ """
+ forecast_cost_data = []
+ month_years = self.__next_twelve_months()
+ month = (datetime.now().month - 1) % 12
+ if month == 0:
+ month = 12
+ if len(str(month)) == 1:
+ month = f'0{month}'
+ year = datetime.now().year
+ cache_start_date = f'{year}-{str(month)}-01'
+ for data in cost_data:
+ if cache_start_date == data.get('start_date') and data.get('CostCenter') > 0:
+ for m_y in month_years:
+ m, y = m_y[0], m_y[1]
+ start_date = f'{y}-{m}-01'
+ timestamp = datetime.strptime(start_date, "%Y-%m-%d")
+ index_id = f'{start_date}-{data.get("Account").lower()}'
+ month = datetime.strftime(timestamp, "%Y %b")
+ projects = []
+ for project in data.get('Projects'):
+ project['Actual'] = 0
+ projects.append(project)
+ forecast_cost_data.append({
+ **data,
+ 'Actual': 0,
+ 'start_date': start_date,
+ 'timestamp': timestamp,
+ 'index_id': index_id,
+ 'Projects': projects,
+ 'filter_date': f'{start_date}-{month.split()[-1]}',
+ 'Month': month}
+ )
+ return forecast_cost_data
+
+ @typechecked()
+ @logger_time_stamp
+ def __get_aggregated_folder_details(self, query_data: list):
+ """
+ This method gives the unique folder_names from the data
+ :param query_data:
+ :return:
+ """
+ project_folders = {}
+ for data in query_data:
+ index = f'{data.get("number")}'
+ month = data.get('month')
+ project_folder_id = data.get('folder_id')
+ insert_data = False
+ if index not in project_folders:
+ insert_data = True
+ else:
+ insert_data = month >= project_folders.get(index).get('month')
+ if insert_data:
+ updated_data = {'month': month, 'folder_id': project_folder_id}
+ for folders in literal_eval(data.get('project_folders')):
+ folder_id = folders.get('resource_name').split('/')[-1]
+ folder_name = folders.get('display_name')
+ updated_data[folder_id] = folder_name
+ project_folders[index] = updated_data
+ return project_folders
+
+ @typechecked()
+ @logger_time_stamp
+ def __get_parent_folders(self, folder_ids: list, folders_data: dict, project_id: str):
+ """
+ This method returns the list of parent folders of Project
+ :param folder_ids:
+ :param folders_data:
+ :param project_id:
+ :return:
+ """
+ parent_folders = {}
+ for idx, _id in enumerate(folder_ids):
+ parent_folders.update({
+ f'parent{idx + 1}': folders_data[project_id].get(_id),
+ f'parent{idx + 1}_id': _id
+ })
+ return parent_folders
+
+ @logger_time_stamp
+ def __get_big_query_data(self):
+ """
+ This method collect the data from the big query and filter the data
+ :return:
+ """
+ cost_usage_queries = self.__prepare_usage_query()
+ query_rows = self.__gcp_account.query_list(cost_usage_queries)
+ folders_data = self.__get_aggregated_folder_details(query_rows[1])
+ agg_data = {}
+ for cst_row in query_rows[0]:
+ project_id, bill_month, total_cost = cst_row.get('project_id').strip(), cst_row.get('month'), float(cst_row.get('total_cost'))
+ folder_ids = folders_data.get(project_id).get('folder_id').split('/')[2:-1] if folders_data.get(project_id) else cst_row.get('folder_id').split('/')[2:-1]
+ folder_name = cst_row.get('folder_name')
+ index = f"{project_id}-{bill_month}"
+ parents_folders = self.__get_parent_folders(folder_ids, folders_data, project_id) if project_id in folders_data else {}
+ if agg_data.get(index):
+ total_cost = float(cst_row.get('total_cost')) + agg_data[index]['Actual']
+ agg_data[index] = {
+ 'folder_name': parents_folders.get(f'parent{len(folder_ids)}', 'NA'),
+ 'start_date': f'{bill_month[:4]}-{bill_month[4:]}-01',
+ 'Project': cst_row.get('project_name'), 'ProjectId': project_id, 'Actual': total_cost,
+ 'Account': parents_folders.get('parent1', 'NA'), 'Forecast': 0, 'folder_ids': folder_ids, **parents_folders
+ }
+ agg_data[index]['timestamp'] = datetime.strptime(agg_data[index]['start_date'], '%Y-%m-%d')
+ month = datetime.strftime(agg_data[index]['timestamp'], "%Y %b")
+ agg_data[index]['Month'] = month
+ agg_data[index]['filter_date'] = f'{agg_data[index]["start_date"]}-{month.split()[-1]}'
+ return self.__organized_results(list(agg_data.values()))
+
+ @logger_time_stamp
+ def __get_cost_and_upload(self):
+ """
+ This method collect the cost and uploads to the ElasticSearch"
+ :return:
+ """
+ collected_data = self.__get_big_query_data()
+ forecast_data = self.__forecast_for_next_months(cost_data=collected_data)
+ upload_data = collected_data + forecast_data
+ self.elastic_upload.es_upload_data(items=upload_data, set_index='index_id')
+
+ @logger_time_stamp
+ def run(self):
+ """
+ This method run the gcp cost explorer methods
+ :return:
+ """
+ self.__get_cost_and_upload()
diff --git a/cloud_governance/policy/ibm/cost_billing_reports.py b/cloud_governance/policy/ibm/cost_billing_reports.py
index 5e130ad9..efbfe29a 100644
--- a/cloud_governance/policy/ibm/cost_billing_reports.py
+++ b/cloud_governance/policy/ibm/cost_billing_reports.py
@@ -23,8 +23,7 @@ def __init__(self):
self.__ibm_account = IBMAccount()
self.__elastic_upload = ElasticUpload()
self.update_to_gsheet = UploadToGsheet()
- self.owner = self.__environment_variables_dict.get('COST_CENTER_OWNER')
- self.cost_center, self.__account_budget, self.__years = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.__ibm_account.short_account_id)
+ self.cost_center, self.__account_budget, self.__years, self.__owner = self.update_to_gsheet.get_cost_center_budget_details(account_id=self.__ibm_account.short_account_id)
def prepare_es_data(self, month: str, year: str, usage_cost: float = 0, next_invoice: float = 0):
"""This method prepares the data to upload to the es"""
@@ -42,7 +41,7 @@ def prepare_es_data(self, month: str, year: str, usage_cost: float = 0, next_inv
'Month': month,
'CostCenter': self.cost_center,
'CloudName': 'IBM Cloud',
- 'Owner': self.owner,
+ 'Owner': self.__owner,
'Forecast': round(next_invoice, 3),
'Actual': round(usage_cost, 3),
'filter_date': f'{start_date}-{month.split()[-1]}',
@@ -66,10 +65,17 @@ def get_cost_usage_details(self):
if past_usage_cost:
es_data = self.prepare_es_data(usage_cost=round(past_usage_cost.get('resources').get('billable_cost'), 3), month=str(last_month), year=str(last_month_year))
upload_es_data[es_data['index_id']] = es_data
- for next_month in range(month+1, month+11):
- new_year = date + relativedelta(month=next_month)
- es_data = self.prepare_es_data(month=new_year.strftime("%m"), year=str(new_year.year))
- upload_es_data[es_data['index_id']] = es_data
+ for next_month in range(self.MONTHS):
+ next_month = (next_month + month) % self.MONTHS
+ if next_month != month:
+ c_year = year
+ if len(str(next_month)) != 2:
+ next_month = f'0{next_month}'
+ if next_month == '00':
+ year += 1
+ next_month = str(12)
+ es_data = self.prepare_es_data(month=str(next_month), year=str(c_year))
+ upload_es_data[es_data['index_id']] = es_data
if upload_es_data:
self.__elastic_upload.es_upload_data(items=list(upload_es_data.values()), set_index='index_id')
return list(upload_es_data.values())
diff --git a/cloud_governance/policy/ibm/ibm_cost_report.py b/cloud_governance/policy/ibm/ibm_cost_report.py
index 0af571e7..d0f5bd34 100644
--- a/cloud_governance/policy/ibm/ibm_cost_report.py
+++ b/cloud_governance/policy/ibm/ibm_cost_report.py
@@ -27,7 +27,7 @@ def __init__(self):
@typechecked
def collect_tags_from_machines(self, tags: list):
"""
- This method return tags from list of string tags
+ This method returns tags from list of string tags
@param tags:
@return:
"""
diff --git a/cloud_governance/policy/ibm/tag_vm.py b/cloud_governance/policy/ibm/tag_vm.py
index 3462e1fd..b66b9987 100644
--- a/cloud_governance/policy/ibm/tag_vm.py
+++ b/cloud_governance/policy/ibm/tag_vm.py
@@ -13,7 +13,7 @@ def __init__(self):
def get_virtual_machine_username(self, vm_id: str):
"""
- This method return the virtual machine username from the billing order lists
+ This method returns the virtual machine username from the billing order lists
@param vm_id:
@return:
"""
diff --git a/cloud_governance/policy/policy_operations/aws/tag_cluster/remove_cluster_tags.py b/cloud_governance/policy/policy_operations/aws/tag_cluster/remove_cluster_tags.py
index a23fb720..51baf46d 100644
--- a/cloud_governance/policy/policy_operations/aws/tag_cluster/remove_cluster_tags.py
+++ b/cloud_governance/policy/policy_operations/aws/tag_cluster/remove_cluster_tags.py
@@ -66,7 +66,7 @@ def remove_instance_tags(self, instance_list: list, tags: list):
def get_cluster(self, clusters: list):
"""
- This method return cluster, and it tags
+ This method returns cluster, and it tags
@param clusters:
@return:
"""
@@ -205,7 +205,7 @@ def cluster_images(self, instance_tags: dict):
def cluster_snapshot(self, instance_tags: dict):
"""
- This method return list of cluster's snapshot according to cluster tag name
+ This method returns list of cluster's snapshot according to cluster tag name
@return:
"""
snapshots_data = self.ec2_operations.get_snapshots()
@@ -458,7 +458,7 @@ def cluster_role(self, instance_tags: dict):
def cluster_user(self, instance_tags: dict):
"""
- This method return list of cluster's user according to cluster name
+ This method returns list of cluster's user according to cluster name
@param instance_tags:
@return:
"""
diff --git a/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_operations.py b/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_operations.py
index 2bd5fd97..16a74638 100644
--- a/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_operations.py
+++ b/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_operations.py
@@ -1,3 +1,5 @@
+from datetime import datetime
+
import boto3
from cloud_governance.common.clouds.aws.cloudtrail.cloudtrail_operations import CloudTrailOperations
@@ -11,7 +13,7 @@ class TagClusterOperations:
This class tags AWS resources
"""
- def __init__(self, input_tags: dict, cluster_name: str, cluster_prefix: str, region: str, dry_run: str, cluster_only: bool):
+ def __init__(self, region: str, input_tags: dict = None, cluster_name: str = None, cluster_prefix: str = None, dry_run: str = None, cluster_only: bool = None):
self.cluster_only = cluster_only
self.cluster_prefix = cluster_prefix
self.utils = Utils(region=region)
@@ -27,6 +29,7 @@ def __init__(self, input_tags: dict, cluster_name: str, cluster_prefix: str, r
self.cloudtrail = CloudTrailOperations(region_name='us-east-1')
self._get_username_from_instance_id_and_time = CloudTrailOperations(region_name=region).get_username_by_instance_id_and_time
self.dry_run = dry_run
+ self.iam_users = self.iam_operations.get_iam_users_list()
def _input_tags_list_builder(self):
"""
@@ -67,3 +70,29 @@ def _fill_na_tags(self, user: str = None):
else:
tags.append({'Key': key, 'Value': value})
return tags
+
+ def get_user_name_from_name_tag(self, tags: list):
+ """
+ This method retuns the username from the name tag verified with iam users
+ :param tags:
+ :return:
+ """
+ user_name = self.ec2_operations.get_tag_value_from_tags(tags=tags, tag_name='User')
+ if user_name in self.iam_users:
+ return user_name
+ else:
+ name_tag = self.ec2_operations.get_tag_value_from_tags(tags=tags, tag_name='Name')
+ for user in self.iam_users:
+ if user in name_tag:
+ return user
+ return None
+
+ def get_username(self, start_time: datetime, resource_id: str, resource_type: str, tags: list):
+ """
+ This method returns the username
+ :return:
+ """
+ iam_username = self.get_user_name_from_name_tag(tags=tags)
+ if not iam_username:
+ return self._get_username_from_instance_id_and_time(start_time=start_time, resource_id=resource_id, resource_type=resource_type)
+ return iam_username
diff --git a/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_resouces.py b/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_resouces.py
index 5b9fa619..50f69510 100644
--- a/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_resouces.py
+++ b/cloud_governance/policy/policy_operations/aws/tag_cluster/tag_cluster_resouces.py
@@ -12,6 +12,7 @@ class TagClusterResources(TagClusterOperations):
"""
SHORT_ID = 5
+ NA_VALUE = 'NA'
def __init__(self, cluster_name: str = None, cluster_prefix: str = None, input_tags: dict = None,
region: str = 'us-east-2', dry_run: str = 'yes', cluster_only: bool = False):
@@ -107,7 +108,7 @@ def __remove_tags_start_with_aws(self, tags: list):
def __generate_cluster_resources_list_by_tag(self, resources_list: list, input_resource_id: str,
tags: str = 'Tags'):
"""
- This method return resource list that related to input resource id according to cluster's tag name and update the tags
+ This method returns resource list that related to input resource id according to cluster's tag name and update the tags
@param resources_list:
@param input_resource_id:
@param ids:
@@ -141,10 +142,7 @@ def __generate_cluster_resources_list_by_tag(self, resources_list: list, input_r
for cluster_name, cluster_id in cluster_ids.items():
if self.dry_run == 'no':
try:
- if self.cluster_name in cluster_name:
- self.ec2_client.create_tags(Resources=cluster_id, Tags=cluster_tags.get(cluster_name))
- else:
- self.ec2_client.create_tags(Resources=cluster_id, Tags=cluster_tags.get(cluster_name))
+ self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=cluster_id, tags=cluster_tags.get(cluster_name))
logger.info(f'{input_resource_id}:: {cluster_name}, count: {len(cluster_id)}, {cluster_id}, {cluster_tags.get(cluster_name)}')
except Exception as err:
logger.info(err)
@@ -153,7 +151,7 @@ def __generate_cluster_resources_list_by_tag(self, resources_list: list, input_r
def __generate_cluster_resources_list_by_vpc(self, resources_list: list, input_resource_id: str):
"""
- This method return resource list that related to input resource id according to cluster's vpc id
+ This method returns resource list that related to input resource id according to cluster's vpc id
@param resources_list:
@param input_resource_id:
@return:
@@ -174,12 +172,12 @@ def __generate_cluster_resources_list_by_vpc(self, resources_list: list, input_r
if self.cluster_name:
if self.cluster_name in cluster_tag[0].get('Key'):
if self.dry_run == 'no':
- self.ec2_client.create_tags(Resources=[resource_id], Tags=all_tags)
+ self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[resource_id], tags=all_tags)
logger.info(all_tags)
result_resources_list.append(resource_id)
else:
if self.dry_run == 'no':
- self.ec2_client.create_tags(Resources=[resource_id], Tags=all_tags)
+ self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[resource_id], tags=all_tags)
logger.info(all_tags)
result_resources_list.append(resource_id)
break
@@ -229,11 +227,14 @@ def __validate_existing_tag(self, tags: list):
@param tags:
@return:
"""
+ check_tags = ['User', 'Project', 'Manager', 'Owner', 'Email']
+ tag_count = 0
for tag in tags:
- for key, value in self.input_tags.items():
- if tag.get('Key') == key:
- return True
- return False
+ if tag.get('Key') in check_tags:
+ tag_count += 1
+ if tag.get('Value') == 'NA':
+ return False
+ return tag_count == len(check_tags)
def update_cluster_tags(self, resources: list):
"""
@@ -248,24 +249,22 @@ def update_cluster_tags(self, resources: list):
for instance in resources:
for item in instance:
instance_id = item['InstanceId']
- if item.get('Tags'):
+ tags = item.get('Tags')
+ if tags:
# search that not exist permanent tags in the resource
- if not self.__validate_existing_tag(item.get('Tags')):
- for tag in item['Tags']:
+ if not self.__validate_existing_tag(tags):
+ for tag in tags:
if self.cluster_prefix in tag.get('Key'):
add_tags = self.__append_input_tags()
cluster_name = tag.get('Key').split('/')[-1]
- if cluster_name in cluster_instances:
- add_tags = self.__filter_resource_tags_by_add_tags(tags=item.get('Tags'),
- search_tags=cluster_tags[
- cluster_name])
+ user = self.ec2_operations.get_tag_value_from_tags(tags=tags, tag_name='User')
+ if cluster_name in cluster_instances and user and user != 'NA':
+ add_tags = self.__filter_resource_tags_by_add_tags(tags=tags, search_tags=cluster_tags[cluster_name])
if add_tags:
cluster_instances[cluster_name].append(instance_id)
break
else:
- username = self._get_username_from_instance_id_and_time(
- start_time=item.get('LaunchTime'), resource_id=instance_id,
- resource_type='AWS::EC2::Instance')
+ username = self.get_username(start_time=item.get('LaunchTime'), resource_id=instance_id, resource_type='AWS::EC2::Instance', tags=tags)
if username:
if username == 'AutoScaling':
add_tags.extend(self._fill_na_tags(user=username))
@@ -275,10 +274,14 @@ def update_cluster_tags(self, resources: list):
if not self.__check_user_in_username_tags(user_tags):
try:
user = self.iam_client.get_user(UserName=username)['User']
- username = self.cloudtrail.get_username_by_instance_id_and_time(
+ temp_username = self.cloudtrail.get_username_by_instance_id_and_time(
start_time=user.get('CreateDate'), resource_id=username,
resource_type='AWS::IAM::User')
- user_tags = self.iam_operations.get_user_tags(username=username)
+ if temp_username:
+ add_tags.append({'Key': 'User', 'Value': temp_username})
+ user_tags = self.iam_operations.get_user_tags(username=temp_username)
+ else:
+ add_tags.append({'Key': 'User', 'Value': username})
except:
add_tags.append({'Key': 'User', 'Value': username})
if user_tags:
@@ -294,17 +297,13 @@ def update_cluster_tags(self, resources: list):
add_tags = self.__filter_resource_tags_by_add_tags(tags=item.get('Tags'),
search_tags=add_tags)
if add_tags:
- cluster_instances[cluster_name] = [instance_id]
+ cluster_instances.setdefault(cluster_name, []).append(instance_id)
cluster_tags[cluster_name] = add_tags
break
for cluster_instance_name, instance_ids in cluster_instances.items():
if self.dry_run == 'no':
try:
- if self.cluster_name:
- if cluster_instance_name == self.cluster_name:
- self.ec2_client.create_tags(Resources=instance_ids, Tags=cluster_tags.get(cluster_instance_name))
- else:
- self.ec2_client.create_tags(Resources=instance_ids, Tags=cluster_tags.get(cluster_instance_name))
+ self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=instance_ids, tags=cluster_tags.get(cluster_instance_name))
logger.info(f'Cluster :: {cluster_instance_name} count: {len(instance_ids)} :: InstanceId :: {instance_ids} :: {cluster_tags.get(cluster_instance_name)}')
except Exception as err:
logger.info(err)
@@ -318,7 +317,7 @@ def update_cluster_tags(self, resources: list):
def cluster_instance(self):
"""
- This method return list of cluster's instance according to cluster tag name,
+ This method returns list of cluster's instance according to cluster tag name,
The instances list is different from other resources
it will search for full cluster name (including random suffix string) in case of user input cluster name was given
@return:
@@ -339,7 +338,7 @@ def cluster_instance(self):
def cluster_volume(self):
"""
- This method return list of cluster's volume according to cluster tag name
+ This method returns list of cluster's volume according to cluster tag name
@return:
"""
volumes_data = self.ec2_operations.get_volumes()
@@ -353,7 +352,7 @@ def cluster_volume(self):
def cluster_ami(self):
"""
- This method return list of cluster's ami according to cluster tag name
+ This method returns list of cluster's ami according to cluster tag name
@return:
"""
images_data = self.ec2_operations.get_images()
@@ -367,7 +366,7 @@ def cluster_ami(self):
def cluster_snapshot(self):
"""
- This method return list of cluster's snapshot according to cluster tag name
+ This method returns list of cluster's snapshot according to cluster tag name
@return:
"""
snapshots_data = self.ec2_operations.get_snapshots()
@@ -381,14 +380,14 @@ def cluster_snapshot(self):
def __get_security_group_data(self):
"""
- This method return security group data
+ This method returns security group data
@return:
"""
return self.ec2_operations.get_security_groups()
def cluster_security_group(self):
"""
- This method return list of cluster's security group according to cluster tag name
+ This method returns list of cluster's security group according to cluster tag name
@return:
"""
security_group_ids = self.__generate_cluster_resources_list_by_tag(
@@ -398,7 +397,7 @@ def cluster_security_group(self):
def cluster_elastic_ip(self):
"""
- This method return list of cluster's elastic ip according to cluster tag name
+ This method returns list of cluster's elastic ip according to cluster tag name
@return:
"""
elastic_ips_data = self.ec2_operations.get_elastic_ips()
@@ -409,7 +408,7 @@ def cluster_elastic_ip(self):
def cluster_network_interface(self):
"""
- This method return list of cluster's network interface according to cluster tag name
+ This method returns list of cluster's network interface according to cluster tag name
@return:
"""
network_interfaces_data = self.ec2_operations.get_network_interface()
@@ -422,7 +421,7 @@ def cluster_network_interface(self):
def cluster_load_balancer(self):
"""
- This method return list of cluster's load balancer according to cluster vpc
+ This method returns list of cluster's load balancer according to cluster vpc
@return:
"""
result_resources_list = []
@@ -470,7 +469,7 @@ def cluster_load_balancer(self):
def cluster_load_balancer_v2(self):
"""
- This method return list of cluster's load balancer according to cluster vpc
+ This method returns list of cluster's load balancer according to cluster vpc
@return:
"""
result_resources_list = []
@@ -518,7 +517,7 @@ def cluster_load_balancer_v2(self):
def cluster_vpc(self):
"""
- This method return list of cluster's vpc according to cluster tag name
+ This method returns list of cluster's vpc according to cluster tag name
@return:
"""
vpcs_data = self.ec2_operations.get_vpcs()
@@ -545,7 +544,7 @@ def get_cluster_vpc(self):
def cluster_subnet(self):
"""
- This method return list of cluster's subnet according to cluster tag name
+ This method returns list of cluster's subnet according to cluster tag name
@return:
"""
subnets_data = self.ec2_operations.get_subnets()
@@ -556,7 +555,7 @@ def cluster_subnet(self):
def cluster_route_table(self):
"""
- This method return list of cluster's route table according to cluster tag name
+ This method returns list of cluster's route table according to cluster tag name
@return:
"""
route_tables_data = self.ec2_operations.get_route_tables()
@@ -567,7 +566,7 @@ def cluster_route_table(self):
def cluster_internet_gateway(self):
"""
- This method return list of cluster's route table internet gateway according to cluster tag name
+ This method returns list of cluster's route table internet gateway according to cluster tag name
@return:
"""
internet_gateways_data = self.ec2_operations.get_internet_gateways()
@@ -579,7 +578,7 @@ def cluster_internet_gateway(self):
def cluster_dhcp_option(self):
"""
- This method return list of cluster's dhcp option according to cluster tag name
+ This method returns list of cluster's dhcp option according to cluster tag name
@return:
"""
dhcp_options_data = self.ec2_operations.get_dhcp_options()
@@ -590,7 +589,7 @@ def cluster_dhcp_option(self):
def cluster_vpc_endpoint(self):
"""
- This method return list of cluster's vpc endpoint according to cluster tag name
+ This method returns list of cluster's vpc endpoint according to cluster tag name
@return:
"""
vpc_endpoints_data = self.ec2_operations.get_vpce()
@@ -601,7 +600,7 @@ def cluster_vpc_endpoint(self):
def cluster_nat_gateway(self):
"""
- This method return list of cluster's nat gateway according to cluster tag name
+ This method returns list of cluster's nat gateway according to cluster tag name
@return:
"""
nat_gateways_data = self.ec2_operations.get_nat_gateways()
@@ -612,7 +611,7 @@ def cluster_nat_gateway(self):
def cluster_network_acl(self):
"""
- This method return list of cluster's network acl according to cluster vpc id
+ This method returns list of cluster's network acl according to cluster vpc id
Missing OpenShift Tags for it based on VPCs
@return:
"""
@@ -624,7 +623,7 @@ def cluster_network_acl(self):
def cluster_role(self, cluster_names: list = []):
"""
- This method return list of cluster's role according to cluster name
+ This method returns list of cluster's role according to cluster name
@param cluster_names:
@return:
"""
@@ -674,7 +673,7 @@ def cluster_role(self, cluster_names: list = []):
def cluster_user(self, cluster_names: list = []):
"""
- This method return list of cluster's user according to cluster name
+ This method returns list of cluster's user according to cluster name
@param cluster_names:
@return:
"""
@@ -730,13 +729,22 @@ def __filter_resource_tags_by_add_tags(self, tags: list, search_tags: list):
for search_tag in search_tags:
found = False
for tag in tags:
- if tag.get('Key') == search_tag.get('Key'):
+ if tag.get('Key') == search_tag.get('Key') and tag.get('Value') != 'NA':
found = True
+ break
if not found:
add_tags.append(search_tag)
else:
add_tags.extend(search_tags)
- return add_tags
+ filter_tags = {}
+ for tag in add_tags:
+ key = tag.get('Key')
+ value = tag.get('Value')
+ if key in filter_tags and filter_tags[key].get('Value') == self.NA_VALUE:
+ filter_tags[key] = {'Key': key, 'Value': value}
+ else:
+ filter_tags[key] = {'Key': key, 'Value': value}
+ return list(filter_tags.values())
def __remove_launchTime(self, tags: list):
"""
@@ -748,7 +756,7 @@ def __remove_launchTime(self, tags: list):
def cluster_s3_bucket(self, cluster_names: list = []):
"""
- This method return list of cluster's s3 bucket according to cluster name
+ This method returns list of cluster's s3 bucket according to cluster name
@param cluster_names:
@return:
"""
diff --git a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/non_cluster_operations.py b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/non_cluster_operations.py
index 8db6b2eb..f7b3519d 100644
--- a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/non_cluster_operations.py
+++ b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/non_cluster_operations.py
@@ -10,6 +10,8 @@
class NonClusterOperations:
+ NA_VALUE = 'NA'
+
def __init__(self, region: str = 'us-east-2', dry_run: str = 'yes', input_tags: dict = ''):
self.region = region
self.dry_run = dry_run
@@ -21,6 +23,7 @@ def __init__(self, region: str = 'us-east-2', dry_run: str = 'yes', input_tags:
self.iam_client = IAMOperations()
self.ec2_operations = EC2Operations(region=region)
self.utils = Utils(region=region)
+ self.iam_users = self.iam_client.get_iam_users_list()
def _get_instances_data(self, instance_id: str = ''):
"""
@@ -63,13 +66,21 @@ def _get_tags_of_resources(self, tags: list, search_tags: list):
for search_tag in search_tags:
found = False
for tag in tags:
- if tag.get('Key') == search_tag.get('Key'):
+ if tag.get('Key') == search_tag.get('Key') and tag.get('Value') != 'NA':
found = True
if not found:
add_tags.append(search_tag)
else:
add_tags.extend(search_tags)
- return add_tags
+ filter_tags = {}
+ for tag in add_tags:
+ key = tag.get('Key')
+ value = tag.get('Value')
+ if key in filter_tags and filter_tags[key].get('Value') == self.NA_VALUE:
+ filter_tags[key] = {'Key': key, 'Value': value}
+ else:
+ filter_tags[key] = {'Key': key, 'Value': value}
+ return list(filter_tags.values())
def _fill_na_tags(self, user: str = None):
"""
@@ -89,15 +100,15 @@ def _fill_na_tags(self, user: str = None):
tags.append({'Key': key, 'Value': value})
return tags
- def _get_username_from_cloudtrail(self, start_time: datetime, resource_id: str, resource_type: str):
+ def _get_username_from_cloudtrail(self, start_time: datetime, resource_id: str, resource_type: str, end_time: datetime = None):
"""
- This method return username fom cloudtrail
+ This method returns username fom cloudtrail
@param start_time:
@param resource_id:
@param resource_type:
@return:
"""
- return self.cloudtrail.get_username_by_instance_id_and_time(start_time=start_time, resource_id=resource_id, resource_type=resource_type)
+ return self.cloudtrail.get_username_by_instance_id_and_time(start_time=start_time, resource_id=resource_id, resource_type=resource_type, end_time=end_time)
def _get_resource_data(self, resource_method: callable):
"""
@@ -121,7 +132,7 @@ def _convert_datetime_format(self, date_time: datetime):
def _build_tag(self, key: str, value: any):
"""
- This method return Key value pair
+ This method returns Key value pair
@param key:
@param value:
@return:
@@ -152,7 +163,7 @@ def _get_tags_from_instance_item(self, instance_item: dict):
def _get_tags_fom_attachments(self, attachments: list):
"""
- This method return tags from attachments
+ This method returns tags from attachments
@param attachments:
@return:
"""
@@ -183,4 +194,43 @@ def _get_tags_from_snapshot_description_images(self, description: str):
username = self._get_username_from_cloudtrail(start_time=start_time, resource_id=image_id, resource_type='AWS::EC2::Ami')
return tags, username
+ def get_user_name_from_name_tag(self, tags: list = None, resource_name: str = None):
+ """
+ This method retuns the username from the name tag verified with iam users
+ :param resource_name:
+ :param tags:
+ :return:
+ """
+ name_tag = self.ec2_operations.get_tag_value_from_tags(tags=tags, tag_name='Name') if tags else resource_name
+ for user in self.iam_users:
+ if user in name_tag:
+ return user
+ return None
+
+ def get_username(self, start_time: datetime, resource_id: str, resource_type: str, tags: list, resource_name: str = '', end_time: datetime = None):
+ """
+ This method returns the username
+ :return:
+ """
+ iam_username = self.get_user_name_from_name_tag(tags=tags, resource_name=resource_name)
+ if not iam_username:
+ iam_username = self.get_user_name_from_name_tag(resource_name=resource_name)
+ if not iam_username:
+ return self._get_username_from_cloudtrail(start_time=start_time, resource_id=resource_id, resource_type=resource_type, end_time=end_time)
+ return iam_username
+ def validate_existing_tag(self, tags: list):
+ """
+ This method validates that permanent tag exists in tags list
+ @param tags:
+ @return:
+ """
+ check_tags = ['User', 'Project', 'Manager', 'Owner', 'Email']
+ tag_count = 0
+ if tags:
+ for tag in tags:
+ if tag.get('Key') in check_tags:
+ tag_count += 1
+ if tag.get('Value') == 'NA':
+ return False
+ return tag_count == len(check_tags)
diff --git a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/tag_non_cluster_resources.py b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/tag_non_cluster_resources.py
index 69b97b5d..0b14d684 100644
--- a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/tag_non_cluster_resources.py
+++ b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/tag_non_cluster_resources.py
@@ -1,4 +1,4 @@
-from datetime import datetime
+from datetime import datetime, timedelta
from cloud_governance.common.logger.init_logger import logger
from cloud_governance.policy.policy_operations.aws.tag_non_cluster.non_cluster_operations import NonClusterOperations
@@ -36,7 +36,7 @@ def __get_instance_tags(self, launch_time: datetime, instance_id: str, tags: lis
@param tags:
@return:
"""
- username = self._get_username_from_cloudtrail(start_time=launch_time, resource_id=instance_id, resource_type='AWS::EC2::Instance')
+ username = self.get_username(start_time=launch_time, resource_id=instance_id, resource_type='AWS::EC2::Instance', tags=tags)
search_tags = []
user_tags = []
if not username:
@@ -67,15 +67,17 @@ def non_cluster_update_ec2(self, instances_list: list = None):
for item in instance:
instance_id = item.get('InstanceId')
launch_time = item.get('LaunchTime')
- add_tags = self.__get_instance_tags(launch_time=launch_time, instance_id=instance_id, tags=item.get('Tags'))
- if add_tags:
- if self.dry_run == 'no':
- try:
- self.ec2_client.create_tags(Resources=[instance_id], Tags=add_tags)
- logger.info(f'Added tags to instance: {instance_id} total: {len(add_tags)} tags: {add_tags}')
- except Exception as err:
- logger.info(err)
- instances_ids.append(instance_id)
+ tags = item.get('Tags')
+ if not self.validate_existing_tag(tags=tags):
+ add_tags = self.__get_instance_tags(launch_time=launch_time, instance_id=instance_id, tags=tags)
+ if add_tags:
+ if self.dry_run == 'no':
+ try:
+ self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[instance_id], tags=add_tags)
+ logger.info(f'Added tags to instance: {instance_id} total: {len(add_tags)} tags: {add_tags}')
+ except Exception as err:
+ logger.info(err)
+ instances_ids.append(instance_id)
logger.info(f'non_cluster_ec2 count: {len(sorted(instances_ids))} {sorted(instances_ids)}')
return sorted(instances_ids)
@@ -90,37 +92,39 @@ def update_volumes(self, volumes_data: list = None):
volume_ids = []
for volume in volumes_data:
volume_id = volume.get('VolumeId')
- username = self._get_username_from_cloudtrail(start_time=volume.get('CreateTime'), resource_id=volume_id, resource_type='AWS::EC2::Volume')
- search_tags = []
- if not username:
- get_tags, username = self._get_tags_fom_attachments(attachments=volume.get('Attachments'))
- search_tags.extend(get_tags)
- else:
- search_tags.extend(self._append_input_tags())
- if username:
- user_tags = self.iam_client.get_user_tags(username=username)
- if not user_tags:
- search_tags.extend(self._fill_na_tags(user=username))
+ tags = volume.get('Tags')
+ if not self.validate_existing_tag(tags=tags):
+ username = self.get_username(start_time=volume.get('CreateTime'), resource_id=volume_id, resource_type='AWS::EC2::Volume', tags=tags)
+ search_tags = []
+ if not username:
+ get_tags, username = self._get_tags_fom_attachments(attachments=volume.get('Attachments'))
+ search_tags.extend(get_tags)
else:
- search_tags.extend(user_tags)
- search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'})
- search_tags.append(self._build_tag(key='LaunchTime', value=volume.get('CreateTime')))
- else:
- search_tags.extend(self._fill_na_tags())
- search_tags.extend(self._append_input_tags())
- search_tags.append(self._build_tag(key='LaunchTime', value=volume.get('CreateTime')))
- if not self.__check_name_in_tags(volume.get('Tags')):
- tag_name = f'{username}-{volume_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{volume_id[:self.SHORT_RESOURCE_NAME]}-{self.region}-{volume_id[-self.SHORT_RESOURCE_ID:]}'
- search_tags.append({'Key': 'cg-Name', 'Value': tag_name})
- volume_tags = self._get_tags_of_resources(tags=volume.get('Tags'), search_tags=search_tags)
- if volume_tags:
- if self.dry_run == 'no':
- try:
- self.ec2_client.create_tags(Resources=[volume_id], Tags=volume_tags)
- logger.info(f'added tags to volume_id: {volume_id} total: {len(volume_tags)} tags: {volume_tags}')
- except Exception as err:
- logger.info(err)
- volume_ids.append(volume_id)
+ search_tags.extend(self._append_input_tags())
+ if username:
+ user_tags = self.iam_client.get_user_tags(username=username)
+ if not user_tags:
+ search_tags.extend(self._fill_na_tags(user=username))
+ else:
+ search_tags.extend(user_tags)
+ search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'})
+ search_tags.append(self._build_tag(key='LaunchTime', value=volume.get('CreateTime')))
+ else:
+ search_tags.extend(self._fill_na_tags())
+ search_tags.extend(self._append_input_tags())
+ search_tags.append(self._build_tag(key='LaunchTime', value=volume.get('CreateTime')))
+ if not self.__check_name_in_tags(volume.get('Tags')):
+ tag_name = f'{username}-{volume_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{volume_id[:self.SHORT_RESOURCE_NAME]}-{self.region}-{volume_id[-self.SHORT_RESOURCE_ID:]}'
+ search_tags.append({'Key': 'cg-Name', 'Value': tag_name})
+ volume_tags = self._get_tags_of_resources(tags=volume.get('Tags'), search_tags=search_tags)
+ if volume_tags:
+ if self.dry_run == 'no':
+ try:
+ self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[volume_id], tags=volume_tags)
+ logger.info(f'added tags to volume_id: {volume_id} total: {len(volume_tags)} tags: {volume_tags}')
+ except Exception as err:
+ logger.info(err)
+ volume_ids.append(volume_id)
logger.info(f'non_cluster_volumes count: {len(sorted(volume_ids))} {sorted(volume_ids)}')
return sorted(volume_ids)
@@ -135,43 +139,51 @@ def update_snapshots(self, snapshots: list = None):
snapshot_ids = []
for snapshot in snapshots:
snapshot_id = snapshot.get('SnapshotId')
- username = self._get_username_from_cloudtrail(start_time=snapshot.get('StartTime'), resource_id=snapshot_id, resource_type='AWS::EC2::Snapshot')
- search_tags = []
- if not username:
- if snapshot.get('Description') and 'Created' in snapshot.get('Description'):
- image_tags, username = self._get_tags_from_snapshot_description_images(description=snapshot.get('Description'))
- if not username:
- instance_id = snapshot.get('Description').split(" ")[2].split("(")[1][:-1]
- instances = self._get_instances_data(instance_id)
- if instances:
- for item in instances:
- if item.get('InstanceId') == instance_id:
- item_tags, username = self._get_tags_from_instance_item(instance_item=item)
- else:
- search_tags.extend(self._append_input_tags())
- if username:
- user_tags = self.iam_client.get_user_tags(username=username)
- search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'})
- if not user_tags:
- search_tags.extend(self._fill_na_tags(user=username))
+ tags = snapshot.get('Tags')
+ if not self.validate_existing_tag(tags=tags):
+ username = self.get_username(start_time=snapshot.get('StartTime'), resource_id=snapshot_id, resource_type='AWS::EC2::Snapshot', tags=tags)
+ if 'vm_import_image' in username:
+ start_time = snapshot.get('StartTime') + timedelta(seconds=5)
+ end_time = start_time + timedelta(minutes=30)
+ assume_username = self.get_username(start_time=start_time, resource_id=snapshot_id, resource_type='AWS::EC2::Snapshot', tags=tags, end_time=end_time)
+ if assume_username:
+ username = assume_username
+ search_tags = []
+ if not username:
+ if snapshot.get('Description') and 'Created' in snapshot.get('Description'):
+ image_tags, username = self._get_tags_from_snapshot_description_images(description=snapshot.get('Description'))
+ if not username:
+ instance_id = snapshot.get('Description').split(" ")[2].split("(")[1][:-1]
+ instances = self._get_instances_data(instance_id)
+ if instances:
+ for item in instances:
+ if item.get('InstanceId') == instance_id:
+ item_tags, username = self._get_tags_from_instance_item(instance_item=item)
else:
- search_tags.extend(user_tags)
- else:
- search_tags.extend(self._fill_na_tags())
- search_tags.extend(self._append_input_tags())
- if not self.__check_name_in_tags(snapshot.get('Tags')):
- tag_name = f'{username}-{snapshot_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{snapshot_id[:self.SHOT_SNAPSHOT_ID]}-{self.region}-{snapshot_id[-self.SHORT_RESOURCE_ID:]}'
- search_tags.append({'Key': 'cg-Name', 'Value': tag_name})
- search_tags.append(self._build_tag(key='LaunchTime', value=snapshot.get('StartTime')))
- snapshot_tags = self._get_tags_of_resources(tags=snapshot.get('Tags'), search_tags=search_tags)
- if snapshot_tags:
- if self.dry_run == 'no':
- try:
- self.ec2_client.create_tags(Resources=[snapshot_id], Tags=snapshot_tags)
- logger.info(f'added tags to snapshots: {snapshot_id} total: {len(snapshot_tags)} tags: {snapshot_tags}')
- except Exception as err:
- logger.info(err)
- snapshot_ids.append(snapshot_id)
+ search_tags.extend(self._append_input_tags())
+ if username:
+ user_tags = self.iam_client.get_user_tags(username=username)
+ search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'})
+ if not user_tags:
+ search_tags.extend(self._fill_na_tags(user=username))
+ else:
+ search_tags.extend(user_tags)
+ else:
+ search_tags.extend(self._fill_na_tags())
+ search_tags.extend(self._append_input_tags())
+ if not self.__check_name_in_tags(snapshot.get('Tags')):
+ tag_name = f'{username}-{snapshot_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{snapshot_id[:self.SHOT_SNAPSHOT_ID]}-{self.region}-{snapshot_id[-self.SHORT_RESOURCE_ID:]}'
+ search_tags.append({'Key': 'cg-Name', 'Value': tag_name})
+ search_tags.append(self._build_tag(key='LaunchTime', value=snapshot.get('StartTime')))
+ snapshot_tags = self._get_tags_of_resources(tags=snapshot.get('Tags'), search_tags=search_tags)
+ if snapshot_tags:
+ if self.dry_run == 'no':
+ try:
+ self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[snapshot_id], tags=snapshot_tags)
+ logger.info(f'added tags to snapshots: {snapshot_id} total: {len(snapshot_tags)} tags: {snapshot_tags}')
+ except Exception as err:
+ logger.info(err)
+ snapshot_ids.append(snapshot_id)
logger.info(f'non_cluster_snapshot count: {len(sorted(snapshot_ids))} {sorted(snapshot_ids)}')
return sorted(snapshot_ids)
@@ -187,31 +199,34 @@ def update_ami(self, images: list = None):
image_ids = []
for image in images:
image_id = image.get('ImageId')
+ tags = image.get('Tags')
+ image_name = image.get('Name')
start_time = datetime.fromisoformat(image.get('CreationDate')[:-1] + '+00:00')
- username = self._get_username_from_cloudtrail(start_time=start_time, resource_id=image_id, resource_type='AWS::EC2::Ami')
- search_tags = []
- search_tags.extend(self._append_input_tags())
- if username:
- user_tags = self.iam_client.get_user_tags(username=username)
- search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'})
- if not user_tags:
- search_tags.extend(self._fill_na_tags(user=username))
+ if not self.validate_existing_tag(tags=tags):
+ username = self.get_username(start_time=start_time, resource_id=image_id, resource_type='AWS::EC2::Ami', tags=tags, resource_name=image_name)
+ search_tags = []
+ search_tags.extend(self._append_input_tags())
+ if username:
+ user_tags = self.iam_client.get_user_tags(username=username)
+ search_tags.append({'Key': 'Email', 'Value': f'{username}@redhat.com'})
+ if not user_tags:
+ search_tags.extend(self._fill_na_tags(user=username))
+ else:
+ search_tags.extend(user_tags)
else:
- search_tags.extend(user_tags)
- else:
- search_tags.extend(self._fill_na_tags())
- if not self.__check_name_in_tags(image.get('Tags')):
- tag_name = f'{username}-{image_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{image_id[:self.SHORT_RESOURCE_NAME]}-{self.region}-{image_id[-self.SHORT_RESOURCE_ID:]}'
- search_tags.append({'Key': 'cg-Name', 'Value': tag_name})
- search_tags.append(self._build_tag(key='LaunchTime', value=start_time))
- image_tags = self._get_tags_of_resources(tags=image.get('Tags'), search_tags=search_tags)
- if image_tags:
- if self.dry_run == 'no':
- try:
- self.ec2_client.create_tags(Resources=[image_id], Tags=image_tags)
- logger.info(f'added tags to image: {image_id} total: {len(image_tags)} tags: {image_tags}')
- except Exception as err:
- logger.info(err)
- image_ids.append(image_id)
+ search_tags.extend(self._fill_na_tags())
+ if not self.__check_name_in_tags(image.get('Tags')):
+ tag_name = f'{username}-{image_id[-self.SHORT_RESOURCE_ID:]}' if username else f'{image_id[:self.SHORT_RESOURCE_NAME]}-{self.region}-{image_id[-self.SHORT_RESOURCE_ID:]}'
+ search_tags.append({'Key': 'cg-Name', 'Value': tag_name})
+ search_tags.append(self._build_tag(key='LaunchTime', value=start_time))
+ image_tags = self._get_tags_of_resources(tags=image.get('Tags'), search_tags=search_tags)
+ if image_tags:
+ if self.dry_run == 'no':
+ try:
+ self.utils.tag_aws_resources(client_method=self.ec2_client.create_tags, resource_ids=[image_id], tags=image_tags)
+ logger.info(f'added tags to image: {image_id} total: {len(image_tags)} tags: {image_tags}')
+ except Exception as err:
+ logger.info(err)
+ image_ids.append(image_id)
logger.info(f'non_cluster_amis count: {len(sorted(image_ids))} {sorted(image_ids)}')
return sorted(image_ids)
diff --git a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/update_na_tag_resources.py b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/update_na_tag_resources.py
index 0ef1f548..8a037eca 100644
--- a/cloud_governance/policy/policy_operations/aws/tag_non_cluster/update_na_tag_resources.py
+++ b/cloud_governance/policy/policy_operations/aws/tag_non_cluster/update_na_tag_resources.py
@@ -46,7 +46,7 @@ def __get_resource_ids(self, resource_id: str, resource_name: str):
def __get_key_value(self, key, value):
"""
- This method return key-value pairs
+ This method returns key-value pairs
:param key:
:param value:
:return:
diff --git a/cloud_governance/policy/policy_operations/aws/tag_user/tag_iam_user.py b/cloud_governance/policy/policy_operations/aws/tag_user/tag_iam_user.py
index eb7229cf..e359c61a 100644
--- a/cloud_governance/policy/policy_operations/aws/tag_user/tag_iam_user.py
+++ b/cloud_governance/policy/policy_operations/aws/tag_user/tag_iam_user.py
@@ -1,4 +1,5 @@
import csv
+import os.path
import re
from ast import literal_eval
@@ -77,6 +78,7 @@ def __write_into_csv_file(self, tag_keys: list, tag_values: dict):
else:
file.write(' , ')
file.write('\n')
+ logger.info(f'Generated the file: {self.file_name}')
def generate_user_csv(self):
"""
@@ -89,7 +91,7 @@ def generate_user_csv(self):
tag_values = {}
for user in users:
user_name = user.get('UserName')
- if '-' not in user_name:
+ if user_name.count('-') <= 3:
user_tags = self.IAMOperations.get_user_tags(username=user_name)
tag_values[user_name] = {}
for tag in user_tags:
@@ -105,8 +107,6 @@ def generate_user_csv(self):
break
tag_keys = list(sorted(tag_keys))
self.__write_into_csv_file(tag_keys=tag_keys, tag_values=tag_values)
- with open(self.file_name) as file:
- logger.info(file.read())
def __filter_tags_user_tags(self, user_tags: list, append_tags: list):
"""
@@ -121,8 +121,7 @@ def __filter_tags_user_tags(self, user_tags: list, append_tags: list):
found = False
for user_tag in user_tags:
if user_tag.get('Key').strip() == append_tag.get('Key').strip():
- if user_tag.get('Value').strip() == append_tag.get('Value').strip():
- found = True
+ found = True
if not found:
add_tags.append(append_tag)
else:
@@ -176,30 +175,55 @@ def update_user_tags(self):
"""
count = 0
updated_usernames = []
- with open(self.file_name) as file:
- csvreader = csv.reader(file)
- header = next(csvreader)
- rows = []
- for row in csvreader:
- rows.append(row)
- json_data = self.__get_json_data(header, rows)
- for key, tags in json_data.items():
- try:
- user_tags = self.IAMOperations.get_user_tags(username=key)
- tags.append({'Key': 'User', 'Value': key})
- tags.extend(self.get_user_details_from_ldap(user_name=key))
- filter_tags = self.__filter_tags_user_tags(user_tags, tags)
- if filter_tags:
- self.iam_client.tag_user(UserName=key, Tags=filter_tags)
- logger.info(f'Username :: {key} {filter_tags}')
+ if os.path.exists(self.file_name):
+ with open(self.file_name) as file:
+ csvreader = csv.reader(file)
+ header = next(csvreader)
+ rows = []
+ for row in csvreader:
+ rows.append(row)
+ json_data = self.__get_json_data(header, rows)
+ for key, tags in json_data.items():
+ if self.tag_iam_user_tags(username=key, tags=tags):
updated_usernames.append(key)
count += 1
- except Exception as err:
- logger.info(err)
+ else:
+ users_list = self.get_detail_resource_list(func_name=self.iam_client.list_users, input_tag='Users',
+ check_tag='Marker')
+ for user in users_list:
+ username = user.get('UserName')
+ if username.count('-') <= 3: # assumed if username contains 3 hyphens, it is cluster user
+ if self.tag_iam_user_tags(username=username):
+ updated_usernames.append(username)
+ count += 1
logger.info(f'Updated Tags of IAM Users = {count} :: Usernames {updated_usernames}')
return count
- def __format_tags(self, username: str, headers: list):
+ def tag_iam_user_tags(self, username: str, tags: list = None):
+ """
+ This method tags the IAM User tags
+ :param tags:
+ :param username:
+ :return:
+ """
+ try:
+ if not tags:
+ tags = []
+ user_tags = self.IAMOperations.get_user_tags(username=username)
+ tags.append({'Key': 'User', 'Value': username})
+ tags.extend(self.get_user_details_from_ldap(user_name=username))
+ filter_tags = self.__filter_tags_user_tags(user_tags, tags)
+ if filter_tags:
+ self.iam_client.tag_user(UserName=username, Tags=filter_tags)
+ logger.info(f'Username :: {username} {filter_tags}')
+ return True
+ except Exception as err:
+ logger.error(err)
+ return False
+
+ def __format_tags(self, username: str, headers: list = None):
+ if not headers:
+ headers = ['User']
tags = {'User': username}
user_tags = self.IAMOperations.get_user_tags(username=username)
for user_tag in user_tags:
@@ -212,19 +236,32 @@ def delete_update_user_from_doc(self):
This method removes IAM user if not in the IAM list
@return:
"""
- iam_file = pd.read_csv(self.file_name)
- iam_users = [user['UserName'] for user in self.IAMOperations.get_users()]
- csv_iam_users = list(iam_file['User'])
- for index, user in enumerate(csv_iam_users):
- if user not in iam_users:
- self.__google_drive_operations.delete_rows(spreadsheet_id=self.__SPREADSHEET_ID,
- sheet_name=self.__sheet_name, row_number=index + 1)
- logger.info(f'removed user {user}')
+ self.__google_drive_operations.create_work_sheet(gsheet_id=self.__SPREADSHEET_ID, sheet_name=self.__sheet_name)
+ iam_users = [user['UserName'] for user in
+ self.get_detail_resource_list(func_name=self.iam_client.list_users, input_tag='Users',
+ check_tag='Marker') if user['UserName'].count('-') <= 3]
+ csv_iam_users = []
+ iam_file = pd.DataFrame(columns=['User', "Project"])
+ if os.path.exists(self.file_name):
+ iam_file = pd.read_csv(self.file_name)
+ if not iam_file.empty:
+ csv_iam_users = list(iam_file['User'])
+ for index, user in enumerate(csv_iam_users):
+ if user not in iam_users:
+ self.__google_drive_operations.delete_rows(spreadsheet_id=self.__SPREADSHEET_ID,
+ sheet_name=self.__sheet_name, row_number=index + 1)
+ logger.info(f'removed user {user}')
+ else:
+ iam_file = pd.DataFrame(columns=['User'])
append_data = []
for user in iam_users:
- if '-' not in user:
+ if user.count('-') <= 3:
if user not in csv_iam_users:
- tags = self.__format_tags(username=user, headers=list(iam_file.columns))
+ if not iam_file.empty:
+ tags = self.__format_tags(username=user, headers=list(iam_file.columns))
+ else:
+ append_data.append(['User'])
+ tags = self.__format_tags(username=user)
df2 = pd.DataFrame.from_dict([tags])
iam_file = pd.concat([iam_file, df2], ignore_index=True)
iam_file = iam_file.fillna('')
diff --git a/cloud_governance/policy/policy_operations/aws/zombie_cluster/delete_ec2_resources.py b/cloud_governance/policy/policy_operations/aws/zombie_cluster/delete_ec2_resources.py
index 895c41be..2c866520 100644
--- a/cloud_governance/policy/policy_operations/aws/zombie_cluster/delete_ec2_resources.py
+++ b/cloud_governance/policy/policy_operations/aws/zombie_cluster/delete_ec2_resources.py
@@ -165,8 +165,9 @@ def __delete_load_balancer_v2(self, resource_id: str):
@typeguard.typechecked
def __delete_volume(self, resource_id: str):
try:
- self.client.delete_volume(VolumeId=resource_id)
- logger.info(f'delete_volume: {resource_id}')
+ logger.info(f'Cluster volumes are handled by ebs_unattached')
+ # self.client.delete_volume(VolumeId=resource_id)
+ # logger.info(f'delete_volume: {resource_id}')
except Exception as err:
logger.exception(f'Cannot delete_volume: {resource_id}, {err}')
@@ -260,22 +261,31 @@ def __delete_security_group(self, resource_id: str, vpc_id: str):
security_groups = self.ec2_operations.get_security_groups()
vpc_security_groups = self.__get_cluster_references(resource_id=vpc_id, resource_list=security_groups, input_resource_id='VpcId', output_result='')
for vpc_security_group in vpc_security_groups:
- if vpc_security_group.get('GroupName') == 'default':
- if vpc_security_group.get('IpPermissions'):
- for ip_permission in vpc_security_group.get('IpPermissions'):
- self.client.revoke_security_group_ingress(GroupId=vpc_security_group.get('GroupId'), IpPermissions=[ip_permission])
- logger.info(f'Removed the Ingress rules of Security Group {resource_id} :: {ip_permission}')
- else:
- if vpc_security_group.get('Tags'):
- if self.__is_cluster_resource(tags=vpc_security_group.get('Tags'), cluster_tag=self.cluster_tag):
- logger.info(vpc_security_group.get('GroupId'))
- if vpc_security_group.get('IpPermissions'):
- for ip_permission in vpc_security_group.get('IpPermissions'):
- if ip_permission.get('UserIdGroupPairs'):
- for user_id_group_pair in ip_permission.get('UserIdGroupPairs'):
- if user_id_group_pair.get('GroupId') == resource_id:
- self.client.revoke_security_group_ingress(GroupId=vpc_security_group.get('GroupId'), IpPermissions=[ip_permission])
- logger.info(f'Removed the Ingress rules of Security Group {resource_id} from {vpc_security_group.get("GroupId")}')
+ if resource_id != vpc_security_group.get('GroupId'):
+ if vpc_security_group.get('GroupName') == 'default':
+ logger.info(f'Removing the {resource_id} ingress rule from Default Security Group: {vpc_security_group.get("GroupId")}')
+ if vpc_security_group.get('IpPermissions'):
+ for ip_permission in vpc_security_group.get('IpPermissions'):
+ if ip_permission.get('UserIdGroupPairs'):
+ for user_id_group_pair in ip_permission.get('UserIdGroupPairs'):
+ if user_id_group_pair.get('GroupId') == resource_id:
+ ingress_rule = {'FromPort': ip_permission.get('FromPort'), 'IpProtocol': ip_permission.get('IpProtocol'), 'IpRanges': ip_permission.get('IpRanges'), 'Ipv6Ranges': ip_permission.get('Ipv6Ranges'), 'PrefixListIds': ip_permission.get('PrefixListIds'), 'ToPort': ip_permission.get('ToPort'), 'UserIdGroupPairs': [user_id_group_pair]}
+ self.client.revoke_security_group_ingress(GroupId=vpc_security_group.get('GroupId'), IpPermissions=[ingress_rule])
+ logger.info(f'Removed the Ingress rules of Security Group {vpc_security_group.get("GroupId")} :: {ingress_rule}')
+ else:
+ if vpc_security_group.get('Tags'):
+ if self.__is_cluster_resource(tags=vpc_security_group.get('Tags'), cluster_tag=self.cluster_tag):
+ logger.info(vpc_security_group.get('GroupId'))
+ if vpc_security_group.get('IpPermissions'):
+ for ip_permission in vpc_security_group.get('IpPermissions'):
+ if ip_permission.get('UserIdGroupPairs'):
+ for user_id_group_pair in ip_permission.get('UserIdGroupPairs'):
+ if user_id_group_pair.get('GroupId') == resource_id:
+ ingress_rule = {'FromPort': ip_permission.get('FromPort'), 'IpProtocol': ip_permission.get('IpProtocol'), 'IpRanges': ip_permission.get('IpRanges'),
+ 'Ipv6Ranges': ip_permission.get('Ipv6Ranges'), 'PrefixListIds': ip_permission.get('PrefixListIds'),
+ 'ToPort': ip_permission.get('ToPort'), 'UserIdGroupPairs': [user_id_group_pair]}
+ self.client.revoke_security_group_ingress(GroupId=vpc_security_group.get('GroupId'), IpPermissions=[ingress_rule])
+ logger.info(f'Removed the Ingress rules of Security Group {resource_id} from {ingress_rule}')
network_interfaces = self.ec2_operations.get_network_interface()
network_interface_ids = self.__get_cluster_references(resource_id=vpc_id, resource_list=network_interfaces,
input_resource_id='VpcId',
diff --git a/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py b/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py
index af4d93dc..b7f2c35e 100644
--- a/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py
+++ b/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py
@@ -82,7 +82,7 @@ def __get_resource_list(region, delete: bool = False, resource: str = '', cluste
def zombie_cluster_resource(delete: bool = False, region: str = 'us-east-2', resource: str = '', cluster_tag: str = '',
resource_name: str = '', service_type: str = ''):
"""
- This method return zombie cluster resources,
+ This method returns zombie cluster resources,
How its works? if not exist an instance cluster, the resource is zombie
if delete true it will delete the zombie resource
:return: list of zombie resources
diff --git a/cloud_governance/policy/policy_operations/aws/zombie_cluster/zombie_cluster_common_methods.py b/cloud_governance/policy/policy_operations/aws/zombie_cluster/zombie_cluster_common_methods.py
index 0eb4df60..3c545d4f 100644
--- a/cloud_governance/policy/policy_operations/aws/zombie_cluster/zombie_cluster_common_methods.py
+++ b/cloud_governance/policy/policy_operations/aws/zombie_cluster/zombie_cluster_common_methods.py
@@ -15,7 +15,7 @@
class ZombieClusterCommonMethods:
DAYS_TO_TRIGGER_RESOURCE_MAIL = 4
- DAYS_TO_DELETE_RESOURCE = 7
+ DAYS_TO_DELETE_RESOURCE = environment_variables.environment_variables_dict.get('DAYS_TO_DELETE_RESOURCE')
def __init__(self, region: str, force_delete: bool = False):
self.__environment_variables_dict = environment_variables.environment_variables_dict
@@ -32,6 +32,7 @@ def __init__(self, region: str, force_delete: bool = False):
self.__ldap_host_name = self.__environment_variables_dict.get('LDAP_HOST_NAME', '')
self._special_user_mails = self.__environment_variables_dict.get('special_user_mails', '{}')
self._account_admin = self.__environment_variables_dict.get('account_admin', '')
+ self.__email_alert = self.__environment_variables_dict.get('EMAIL_ALERT') if self.__environment_variables_dict.get('EMAIL_ALERT') else False
self._ldap = LdapSearch(ldap_host_name=self.__ldap_host_name)
self._mail = Postfix()
self._mail_description = MailMessage()
@@ -45,7 +46,7 @@ def _literal_eval(self, data: any):
def get_tag_name_from_tags(self, tags: list, tag_name: str):
"""
- This method return tag_name from resource_tags
+ This method returns tag_name from resource_tags
@param tags:
@param tag_name:
@return:
@@ -75,7 +76,7 @@ def get_zombie_cluster_user_tag(self, zombies: dict, resources: list, resource_i
def _get_tags_of_zombie_resources(self, resources: list, resource_id_name: str, zombies: dict, aws_service: str,
aws_tag: str = 'Tags'):
"""
- This method return tags of the resource i.e {resource_id: tags}
+ This method returns tags of the resource i.e {resource_id: tags}
@param resources:
@param tags:
@return:
@@ -177,7 +178,7 @@ def update_resource_tags(self, tags: list, tag_name: str, tag_value: str):
def get_cluster_delete_days(self, tags: list) -> int:
"""
- This method return the ClusterDeleteDays tag
+ This method returns the ClusterDeleteDays tag
@param tags:
@return:
"""
@@ -188,7 +189,6 @@ def get_cluster_delete_days(self, tags: list) -> int:
cluster_delete_days = int(cluster_delete_days) + 1
return cluster_delete_days
- @logger_time_stamp
def trigger_mail(self, tags: list, resource_id: str, days: int, resources: list, message_type: str):
"""
This method send triggering mail
@@ -254,7 +254,6 @@ def collect_notify_cluster_data(self, resource_data: dict, cluster_left_out_days
delete_data.setdefault(cluster_tag, []).append({func_name: delete_tag_data[cluster_tag]})
return notify_data, delete_data, cluster_data
- @logger_time_stamp
def send_mails_to_cluster_user(self, notify_data: dict, delete_data: dict, cluster_data: dict):
"""
This method send mail to the user to notify cluster status
@@ -263,17 +262,17 @@ def send_mails_to_cluster_user(self, notify_data: dict, delete_data: dict, clust
@param delete_data:
@return:
"""
- for cluster_tag, resource_ids in notify_data.items():
- self.update_resource_tags(tags=cluster_data[cluster_tag], tag_name='Name', tag_value=cluster_tag)
- self.trigger_mail(tags=cluster_data[cluster_tag], resource_id=cluster_tag,
- days=self.DAYS_TO_TRIGGER_RESOURCE_MAIL,
- resources=resource_ids, message_type='notification')
- for cluster_tag, resource_ids in delete_data.items():
- self.update_resource_tags(tags=cluster_data[cluster_tag], tag_name='Name', tag_value=cluster_tag)
- self.trigger_mail(tags=cluster_data[cluster_tag], resource_id=cluster_tag,
- days=self.DAYS_TO_DELETE_RESOURCE, resources=resource_ids, message_type='delete')
+ if self.__email_alert:
+ for cluster_tag, resource_ids in notify_data.items():
+ self.update_resource_tags(tags=cluster_data[cluster_tag], tag_name='Name', tag_value=cluster_tag)
+ self.trigger_mail(tags=cluster_data[cluster_tag], resource_id=cluster_tag,
+ days=self.DAYS_TO_TRIGGER_RESOURCE_MAIL,
+ resources=resource_ids, message_type='notification')
+ for cluster_tag, resource_ids in delete_data.items():
+ self.update_resource_tags(tags=cluster_data[cluster_tag], tag_name='Name', tag_value=cluster_tag)
+ self.trigger_mail(tags=cluster_data[cluster_tag], resource_id=cluster_tag,
+ days=self.DAYS_TO_DELETE_RESOURCE, resources=resource_ids, message_type='delete')
- @logger_time_stamp
def _check_zombie_cluster_deleted_days(self, resources: dict, cluster_left_out_days: dict, zombie: str, cluster_tag: str):
"""
This method check the cluster delete days and return the clusters
diff --git a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py
index ae13efbf..c5f37a9d 100644
--- a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py
+++ b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py
@@ -9,6 +9,7 @@
from cloud_governance.common.clouds.aws.price.resources_pricing import ResourcesPricing
from cloud_governance.common.clouds.aws.s3.s3_operations import S3Operations
from cloud_governance.common.elasticsearch.elastic_upload import ElasticUpload
+from cloud_governance.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations
from cloud_governance.common.ldap.ldap_search import LdapSearch
from cloud_governance.common.logger.init_logger import logger
from cloud_governance.common.mails.mail_message import MailMessage
@@ -19,7 +20,7 @@
class NonClusterZombiePolicy:
- DAYS_TO_DELETE_RESOURCE = 7
+ DAYS_TO_DELETE_RESOURCE = environment_variables.environment_variables_dict.get('DAYS_TO_DELETE_RESOURCE')
DAYS_TO_NOTIFY_ADMINS = 6
DAYS_TO_TRIGGER_RESOURCE_MAIL = 4
DAILY_HOURS = 24
@@ -48,9 +49,13 @@ def __init__(self):
self._mail_description = MailMessage()
self.__ldap_host_name = self.__environment_variables_dict.get('LDAP_HOST_NAME', '')
self._ldap = LdapSearch(ldap_host_name=self.__ldap_host_name)
+ self.__email_alert = self.__environment_variables_dict.get('EMAIL_ALERT') if self.__environment_variables_dict.get('EMAIL_ALERT') else False
+ self.__manager_email_alert = self.__environment_variables_dict.get('MANAGER_EMAIL_ALERT')
self._admins = ['athiruma@redhat.com', 'ebattat@redhat.com']
self._es_upload = ElasticUpload()
self.resource_pricing = ResourcesPricing()
+ self._es_operations = ElasticSearchOperations()
+ self._es_index = self.__environment_variables_dict.get('es_index')
def set_dryrun(self, value: str):
self._dry_run = value
@@ -95,7 +100,7 @@ def _get_tag_name_from_tags(self, tags: list, tag_name: str = 'Name'):
def _calculate_days(self, create_date: datetime):
"""
- This method return the days
+ This method returns the days
@return:
"""
today = datetime.date.today()
@@ -164,30 +169,32 @@ def _trigger_mail(self, tags: list, resource_id: str, days: int, resource_type:
@param resource_id:
@return:
"""
- try:
- special_user_mails = self._literal_eval(self._special_user_mails)
- user, resource_name = self._get_tag_name_from_tags(tags=tags, tag_name='User'), self._get_tag_name_from_tags(
- tags=tags, tag_name='Name')
- if not resource_name:
- resource_name = self._get_tag_name_from_tags(tags=tags, tag_name='cg-Name')
- to = user if user not in special_user_mails else special_user_mails[user]
- ldap_data = self._ldap.get_user_details(user_name=to)
- cc = [self._account_admin, f'{ldap_data.get("managerId")}@redhat.com']
- name = to
- if ldap_data:
- name = ldap_data.get('displayName')
- subject, body = self._mail_description.resource_message(name=name, days=days,
- notification_days=self.DAYS_TO_TRIGGER_RESOURCE_MAIL,
- delete_days=self.DAYS_TO_DELETE_RESOURCE,
- resource_name=resource_name, resource_id=resource_id,
- resource_type=resource_type, msgadmins=self.DAYS_TO_NOTIFY_ADMINS, extra_purse=kwargs.get('extra_purse'))
- if not kwargs.get('admins'):
- self._mail.send_email_postfix(to=to, content=body, subject=subject, cc=cc, resource_id=resource_id, message_type=kwargs.get('message_type'), extra_purse=kwargs.get('delta_cost', 0))
- else:
- kwargs['admins'].append(f'{ldap_data.get("managerId")}@redhat.com')
- self._mail.send_email_postfix(to=kwargs.get('admins'), content=body, subject=subject, cc=[], resource_id=resource_id, message_type=kwargs.get('message_type'), extra_purse=kwargs.get('delta_cost', 0))
- except Exception as err:
- logger.info(err)
+ if self.__email_alert:
+ try:
+ special_user_mails = self._literal_eval(self._special_user_mails)
+ user, resource_name = self._get_tag_name_from_tags(tags=tags, tag_name='User'), self._get_tag_name_from_tags(
+ tags=tags, tag_name='Name')
+ if not resource_name:
+ resource_name = self._get_tag_name_from_tags(tags=tags, tag_name='cg-Name')
+ to = user if user not in special_user_mails else special_user_mails[user]
+ ldap_data = self._ldap.get_user_details(user_name=to)
+ cc = [self._account_admin, f'{ldap_data.get("managerId")}@redhat.com'] if self.__manager_email_alert else []
+ name = to
+ if ldap_data:
+ name = ldap_data.get('displayName')
+ subject, body = self._mail_description.resource_message(name=name, days=days,
+ notification_days=self.DAYS_TO_TRIGGER_RESOURCE_MAIL,
+ delete_days=self.DAYS_TO_DELETE_RESOURCE,
+ resource_name=resource_name, resource_id=resource_id,
+ resource_type=resource_type, msgadmins=self.DAYS_TO_NOTIFY_ADMINS, extra_purse=kwargs.get('extra_purse'))
+ if not kwargs.get('admins'):
+ self._mail.send_email_postfix(to=to, content=body, subject=subject, cc=cc, resource_id=resource_id, message_type=kwargs.get('message_type'), extra_purse=kwargs.get('delta_cost', 0))
+ else:
+ if self.__manager_email_alert:
+ kwargs['admins'].append(f'{ldap_data.get("managerId")}@redhat.com')
+ self._mail.send_email_postfix(to=kwargs.get('admins'), content=body, subject=subject, cc=[], resource_id=resource_id, message_type=kwargs.get('message_type'), extra_purse=kwargs.get('delta_cost', 0))
+ except Exception as err:
+ logger.info(err)
def _update_tag_value(self, tags: list, tag_name: str, tag_value: str):
"""
@@ -231,7 +238,7 @@ def __delete_resource_on_name(self, resource_id: str):
self._ec2_client.delete_volume(VolumeId=resource_id)
elif self._policy == 'ip_unattached':
self._ec2_client.release_address(AllocationId=resource_id)
- elif self._policy == 'nat_gateway_unused':
+ elif self._policy == 'unused_nat_gateway':
self._ec2_client.delete_nat_gateway(NatGatewayId=resource_id)
elif self._policy == 'zombie_snapshots':
self._ec2_client.delete_snapshot(SnapshotId=resource_id)
@@ -287,7 +294,7 @@ def _update_resource_tags(self, resource_id: str, left_out_days: int, tags: list
self._s3_client.put_bucket_tagging(Bucket=resource_id, Tagging={'TagSet': tags})
elif self._policy == 'empty_roles':
self._iam_client.tag_role(RoleName=resource_id, Tags=tags)
- elif self._policy in ('ip_unattached', 'nat_gateway_unused', 'zombie_snapshots'):
+ elif self._policy in ('ip_unattached', 'unused_nat_gateway', 'zombie_snapshots'):
self._ec2_client.create_tags(Resources=[resource_id], Tags=tags)
except Exception as err:
logger.info(f'Exception raised: {err}: {resource_id}')
diff --git a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py
index cc3bcdf0..2e22b291 100644
--- a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py
+++ b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py
@@ -1,5 +1,6 @@
import importlib
import inspect
+from datetime import datetime
from cloud_governance.common.logger.init_logger import logger
from cloud_governance.policy.policy_operations.aws.zombie_non_cluster.run_zombie_non_cluster_policies import NonClusterZombiePolicy
@@ -26,9 +27,22 @@ def run(self):
else:
logger.info(f'key: {cls[0]}, count: {len(response)}, {response}')
policy_result = response
+
+ if self._es_operations.check_elastic_search_connection():
+ if policy_result:
+ for policy_dict in policy_result:
+ policy_dict['region_name'] = self._region
+ policy_dict['account'] = self._account
+ self._es_operations.upload_to_elasticsearch(data=policy_dict, index=self._es_index)
+ logger.info(f'Uploaded the policy results to elasticsearch index: {self._es_index}')
+ else:
+ logger.error(f'No data to upload on @{self._account} at {datetime.utcnow()}')
+ else:
+ logger.error('ElasticSearch host is not pingable, Please check ')
+
if self._policy_output:
- if self._policy not in ('ec2_idle', 'ebs_in_use', 'ec2_run'):
- beautify_data = self._beautify_upload_data(upload_resource_data=response)
- policy_result = {'count': len(beautify_data), self._policy: beautify_data}
+ # if self._policy not in ('ec2_idle', 'ebs_in_use', 'ec2_run', 's3_inactive', 'zombie_snapshots', 'nat_gateway_unused'):
+ # beautify_data = self._beautify_upload_data(upload_resource_data=response)
+ # policy_result = {'count': len(beautify_data), self._policy: beautify_data}
logger.info(policy_result)
self._s3operations.save_results_to_s3(policy=self._policy.replace('_', '-'), policy_output=self._policy_output, policy_result=policy_result)
diff --git a/cloud_governance/policy/policy_operations/gcp/__init__.py b/cloud_governance/policy/policy_operations/gcp/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/cloud_governance/policy/policy_operations/gcp/gcp_policy_runner.py b/cloud_governance/policy/policy_operations/gcp/gcp_policy_runner.py
new file mode 100644
index 00000000..01675d33
--- /dev/null
+++ b/cloud_governance/policy/policy_operations/gcp/gcp_policy_runner.py
@@ -0,0 +1,29 @@
+import importlib
+import inspect
+
+from cloud_governance.common.jira.jira import logger
+from cloud_governance.main.environment_variables import environment_variables
+
+
+class GcpPolicyRunner:
+ """
+ This method run the azure policies
+ """
+
+ def __init__(self):
+ self.__environment_variables_dict = environment_variables.environment_variables_dict
+ self._policy = self.__environment_variables_dict.get('policy')
+ self._cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME')
+
+ def run(self):
+ """
+ Run the azure policies
+ @return:
+ """
+ azure_policies = importlib.import_module(f'cloud_governance.policy.gcp.{self._policy}')
+ logger.info(f'Account: {self._cloud_name}, Policy: {self._policy}')
+ for cls in inspect.getmembers(azure_policies, inspect.isclass):
+ if self._policy.replace('_', '') == cls[0].lower():
+ response = cls[1]().run()
+ if isinstance(response, list) and len(response) > 0:
+ logger.info(f'key: {cls[0]}, count: {len(response)}, {response}')
diff --git a/cloud_governance/policy/policy_operations/gitleaks/gitleaks.py b/cloud_governance/policy/policy_operations/gitleaks/gitleaks.py
index 31cf3670..a559fac4 100644
--- a/cloud_governance/policy/policy_operations/gitleaks/gitleaks.py
+++ b/cloud_governance/policy/policy_operations/gitleaks/gitleaks.py
@@ -39,7 +39,7 @@ def __delete_gitleaks_report(self):
def __get_gitleaks_report(self):
"""
- This method return dict report content
+ This method returns dict report content
"""
report_file = self.__report_file_full_path
if os.path.isfile(report_file):
diff --git a/cloud_governance/policy/policy_runners/__init__.py b/cloud_governance/policy/policy_runners/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/cloud_governance/policy/policy_runners/common_policy_runner.py b/cloud_governance/policy/policy_runners/common_policy_runner.py
new file mode 100644
index 00000000..4f54fd7a
--- /dev/null
+++ b/cloud_governance/policy/policy_runners/common_policy_runner.py
@@ -0,0 +1,29 @@
+import importlib
+import inspect
+
+from cloud_governance.common.logger.init_logger import logger
+from cloud_governance.main.environment_variables import environment_variables
+
+
+class CommonPolicyRunner:
+ """
+ This method run the Common policies
+ """
+
+ def __init__(self):
+ self.__environment_variables_dict = environment_variables.environment_variables_dict
+ self._policy = self.__environment_variables_dict.get('policy')
+ self._cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME')
+
+ def run(self):
+ """
+ Run the Common policies
+ @return:
+ """
+ azure_policies = importlib.import_module(f'cloud_governance.policy.common_policies.{self._policy}')
+ logger.info(f'Account: {self._cloud_name}, Policy: {self._policy}, CLOUD_NAME: {self._cloud_name}')
+ for cls in inspect.getmembers(azure_policies, inspect.isclass):
+ if self._policy.replace('_', '') == cls[0].lower():
+ response = cls[1]().run()
+ if isinstance(response, list) and len(response) > 0:
+ logger.info(f'key: {cls[0]}, count: {len(response)}, {response}')
diff --git a/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instance_types_pricing.py b/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instance_types_pricing.py
new file mode 100644
index 00000000..700f12fa
--- /dev/null
+++ b/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instance_types_pricing.py
@@ -0,0 +1,74 @@
+import json
+import os
+
+import boto3
+from pkg_resources import resource_filename
+
+
+class InstanceTypes:
+
+ def __init__(self):
+ self.ec2_client = boto3.client('ec2', region_name='us-east-1')
+ self.__client = boto3.client('pricing', region_name='us-east-1')
+
+ def get_instance_types(self, region_name: str):
+ """This method fetch all instance types"""
+ instance_types = []
+ ec2_client = boto3.client('ec2', region_name=region_name)
+ response = ec2_client.describe_instance_types()
+ instance_types.extend([ins_type['InstanceType'] for ins_type in response.get('InstanceTypes')])
+ while 'NextToken' in response:
+ response = ec2_client.describe_instance_types(NextToken=response.get('NextToken'))
+ instance_types.extend([ins_type['InstanceType'] for ins_type in response.get('InstanceTypes')])
+ return sorted(instance_types)
+
+ def get_region_name(self, region_code):
+ """
+ This method return region name
+ @param region_code:
+ @return:
+ """
+ default_region = 'us-east-1'
+ endpoint_file = resource_filename('botocore', 'data/endpoints.json')
+ try:
+ with open(endpoint_file, 'r') as f:
+ data = json.load(f)
+ return data['partitions'][0]['regions'][region_code]['description']
+ except IOError:
+ return default_region
+
+ def instance_price(self, region_name: str, instance_type: str):
+ """This method give price of instance type in a region"""
+ FLT = '[{{"Field": "tenancy", "Value": "shared", "Type": "TERM_MATCH"}},' \
+ '{{"Field": "operatingSystem", "Value": "Linux", "Type": "TERM_MATCH"}},' \
+ '{{"Field": "preInstalledSw", "Value": "NA", "Type": "TERM_MATCH"}},' \
+ '{{"Field": "instanceType", "Value": "{t}", "Type": "TERM_MATCH"}},' \
+ '{{"Field": "location", "Value": "{r}", "Type": "TERM_MATCH"}},' \
+ '{{"Field": "capacitystatus", "Value": "Used", "Type": "TERM_MATCH"}}]'
+ f = FLT.format(r=self.get_region_name(region_name), t=instance_type)
+ try:
+ data = self.__client.get_products(ServiceCode='AmazonEC2', Filters=json.loads(f))
+ od = json.loads(data['PriceList'][0])['terms']['OnDemand']
+ id1 = list(od)[0]
+ id2 = list(od[id1]['priceDimensions'])[0]
+ return od[id1]['priceDimensions'][id2]['pricePerUnit']['USD']
+ except Exception as err:
+ return 0
+
+ def instance_prices(self):
+ """This method get the instance prices based on instance_type"""
+ # regions = self.ec2_client.describe_regions()['Regions']
+ # aws_pricing = {}
+ # for region in regions:
+ region_pricing = {}
+ instance_types = self.get_instance_types(region_name='us-west-2')
+ for instance_type in instance_types:
+ price = self.instance_price(region_name='us-west-2', instance_type=instance_type)
+ if float(price) > 0:
+ region_pricing[instance_type] = round(float(price), 4)
+ # aws_pricing[region['RegionName']] = region_pricing
+ with open('instances_price.json', 'w') as file:
+ json.dump(region_pricing, file, indent=4)
+
+
+# InstanceTypes().instance_prices()
diff --git a/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instances_price.json b/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instances_price.json
index 0e21982e..dc99790d 100644
--- a/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instances_price.json
+++ b/clouds_data_ware_house/cloud_resource_orchestration/clouds/aws/instances_price.json
@@ -123,6 +123,7 @@
"c6in.4xlarge": 0.9072,
"c6in.8xlarge": 1.8144,
"c6in.large": 0.1134,
+ "c6in.metal": 7.2576,
"c6in.xlarge": 0.2268,
"c7g.12xlarge": 1.74,
"c7g.16xlarge": 2.32,
@@ -131,8 +132,8 @@
"c7g.8xlarge": 1.16,
"c7g.large": 0.0725,
"c7g.medium": 0.0363,
+ "c7g.metal": 2.32,
"c7g.xlarge": 0.145,
- "cc2.8xlarge": 2.0,
"d2.2xlarge": 1.38,
"d2.4xlarge": 2.76,
"d2.8xlarge": 5.52,
@@ -206,6 +207,12 @@
"i3en.large": 0.226,
"i3en.metal": 10.848,
"i3en.xlarge": 0.452,
+ "i4g.16xlarge": 4.9421,
+ "i4g.2xlarge": 0.6178,
+ "i4g.4xlarge": 1.2355,
+ "i4g.8xlarge": 2.471,
+ "i4g.large": 0.1544,
+ "i4g.xlarge": 0.3089,
"i4i.16xlarge": 5.491,
"i4i.2xlarge": 0.686,
"i4i.32xlarge": 10.9824,
@@ -363,6 +370,7 @@
"m6idn.4xlarge": 1.273,
"m6idn.8xlarge": 2.5459,
"m6idn.large": 0.1591,
+ "m6idn.metal": 10.1837,
"m6idn.xlarge": 0.3182,
"m6in.12xlarge": 3.3415,
"m6in.16xlarge": 4.4554,
@@ -372,7 +380,17 @@
"m6in.4xlarge": 1.1138,
"m6in.8xlarge": 2.2277,
"m6in.large": 0.1392,
+ "m6in.metal": 8.9107,
"m6in.xlarge": 0.2785,
+ "m7g.12xlarge": 1.9584,
+ "m7g.16xlarge": 2.6112,
+ "m7g.2xlarge": 0.3264,
+ "m7g.4xlarge": 0.6528,
+ "m7g.8xlarge": 1.3056,
+ "m7g.large": 0.0816,
+ "m7g.medium": 0.0408,
+ "m7g.metal": 2.6112,
+ "m7g.xlarge": 0.1632,
"p2.16xlarge": 14.4,
"p2.8xlarge": 7.2,
"p2.xlarge": 0.9,
@@ -510,6 +528,7 @@
"r6idn.4xlarge": 1.5631,
"r6idn.8xlarge": 3.1262,
"r6idn.large": 0.1954,
+ "r6idn.metal": 12.505,
"r6idn.xlarge": 0.3908,
"r6in.12xlarge": 4.1839,
"r6in.16xlarge": 5.5786,
@@ -519,7 +538,17 @@
"r6in.4xlarge": 1.3946,
"r6in.8xlarge": 2.7893,
"r6in.large": 0.1743,
+ "r6in.metal": 11.1571,
"r6in.xlarge": 0.3487,
+ "r7g.12xlarge": 2.5704,
+ "r7g.16xlarge": 3.4272,
+ "r7g.2xlarge": 0.4284,
+ "r7g.4xlarge": 0.8568,
+ "r7g.8xlarge": 1.7136,
+ "r7g.large": 0.1071,
+ "r7g.medium": 0.0536,
+ "r7g.metal": 3.4272,
+ "r7g.xlarge": 0.2142,
"t1.micro": 0.02,
"t2.2xlarge": 0.3712,
"t2.large": 0.0928,
@@ -551,6 +580,7 @@
"t4g.xlarge": 0.1344,
"trn1.2xlarge": 1.3438,
"trn1.32xlarge": 21.5,
+ "trn1n.32xlarge": 24.78,
"u-12tb1.112xlarge": 109.2,
"u-18tb1.112xlarge": 163.8,
"u-3tb1.56xlarge": 27.3,
@@ -602,4 +632,4 @@
"z1d.large": 0.186,
"z1d.metal": 4.464,
"z1d.xlarge": 0.372
-}
+}
\ No newline at end of file
diff --git a/cloudsensei/README.md b/cloudsensei/README.md
new file mode 100644
index 00000000..03dc3967
--- /dev/null
+++ b/cloudsensei/README.md
@@ -0,0 +1,88 @@
+## CloudSensei
+
+CloudSensei is an effort to uncover potential cloud resource leaks which might lead to inefficient cloud management. Currently, CloudSensei helps generate a daily report allowing stakeholders to action on long-running EC2 instances.
+
+#### To-Do:
+Eventually, CloudSensei will be flipped to work as a Slackbot, allowing users to join a “read-only” Slack channel to review daily expense reports for instance.
+
+### How it works?
+To implement this functionality, CloudSensei utilizes AWS Lambda + EventBridge.
+The EventBridge Scheduler (CronJob) will run on every day 17:00hrs IST.
+
+
+#### How to send Slack notifications on Slack?
+1. Create a new Slack bot on your slack workspace, add it to desired channel
+2. Generate [OAuth](https://api.slack.com/authentication/token-types#bot) Token for Slack Bot
+3. Use [Block Kit](https://api.slack.com/block-kit) to build message formats.
+4. Use Slack [postMessage API](https://api.slack.com/methods/chat.postMessage) to post messages to Slack channel
+
+#### Steps to create Slack bot:
+1. Go to [api.slack.com](https://api.slack.com/)
+2. Click on **Your apps** and click on **Manage your apps**.
+3. Click on **Create New App**.
+4. Select create from scratch.
+ 1. Enter necessary fields and create app
+5. A Basic information tab will open, select options **Bots**.
+6. Click on **OAuth & Permissions** on left panel
+ 1. Click on
+ 2. Under the scopes, add only **Bots Token Scopes**
+ 3. Under the **OAuth Tokens for Your Workspace**, Submit **Install to Workspace** and allow access.
+
+#### Adding Slack bot to your channel
+
+To Create a Lambda function & integrate with EventBridge you must export some env variables:
+
+Fill the env.txt file to export environment variables
+
+To store the data of long-running instances in elastic search
+export below variables
+```commandline
+ACCOUNT_ID=$
+AWS_DEFAULT_REGION=$
+RESOURCE_DAYS=$
+SEND_AGG_MAIL=$
+ES_SERVER=$
+```
+
+To send mail of the long-running instances
+export below variables
+```commandline
+ACCOUNT_ID=$
+AWS_DEFAULT_REGION=$
+RESOURCE_DAYS=$
+SES_HOST_ADDRESS=$
+SES_HOST_PORT=$
+SES_USER_ID=$
+SES_PASSWORD=$
+TO_ADDRESS=$
+CC_ADDRESS=$
+```
+
+To send Slack notifications in Slack channel
+export below variables
+```commandline
+ACCOUNT_ID=$
+AWS_DEFAULT_REGION=$
+RESOURCE_DAYS=$
+SLACK_API_TOKEN=$
+SLACK_CHANNEL_NAME=$
+```
+
+Note: Use env.txt to export above varibales
+
+```commandline
+git clone https://github.com/redhat-performance/cloud-governance
+cd cloud-governance/cloudsensei/
+./run.sh deploy
+# Copy the tfstate file backup, incase it is deleted, we cannot retrieve it
+```
+
+To delete the Lambda + Event_bridge service [ ** must have the tfstate file]
+```commandline
+cd cloudsensei
+./run.sh destroy
+```
+
+##### Limits of BlockKit
+
+1. Can only send 50 item per block.
diff --git a/cloudsensei/agg_lambda/lambda_function.py b/cloudsensei/agg_lambda/lambda_function.py
new file mode 100644
index 00000000..cfd96c5f
--- /dev/null
+++ b/cloudsensei/agg_lambda/lambda_function.py
@@ -0,0 +1,43 @@
+import json
+import logging
+import os
+from datetime import datetime
+from time import time
+
+from es_operations import ESOperations
+from send_email import send_email_with_ses
+
+
+def lambda_handler(event, context):
+ """
+ This lambda function sends notifications to slack on long running resources on AWS Cloud
+ :param event:
+ :param context:
+ :return:
+ """
+ start_time = time()
+ logging.info(f"{lambda_handler.__name__} started at {datetime.utcnow()}")
+ aws_accounts = ["perf-dept", "openshift-perfscale", "openshift-psap"]
+ code = 400
+ message = "Something went wrong check your es_data"
+ es_operations = ESOperations()
+ email_body = ""
+ subject = "Weekly Cloud Report: Long running instances in the Perf&Scale AWS Accounts"
+ for account in aws_accounts:
+ current_date = str(datetime.utcnow().date())
+ index_id = f"{account}-{current_date}"
+ es_data = es_operations.get_es_data_by_id(index_id)
+ if es_data:
+ email_body += f"Cloud Report: Long running instances in the @{account} account
"
+ email_body += es_data.get('_source').get('body')
+ email_body += "
"
+ response = send_email_with_ses(body=email_body, subject=subject, to=os.environ.get('TO_ADDRESS'), cc=os.environ.get('CC_ADDRESS'))
+ if response:
+ code = 200
+ message = "Successfully sent an emails"
+ end_time = time()
+ return {
+ 'statusCode': code,
+ 'body': json.dumps(message),
+ 'total_running_time': f"{end_time - start_time} s"
+ }
diff --git a/cloudsensei/agg_lambda/run.sh b/cloudsensei/agg_lambda/run.sh
new file mode 100755
index 00000000..81cbffbf
--- /dev/null
+++ b/cloudsensei/agg_lambda/run.sh
@@ -0,0 +1,22 @@
+PROJECT_NAME="AggFunction"
+SUCCESS_OUTPUT_PATH="/dev/null"
+ERROR_LOG="$(mktemp -d)/stderr.log"
+
+
+echo "Clearing if previously created zip file"
+PROJECT_PATH="$PWD/$PROJECT_NAME.zip"
+if [ -f $PROJECT_PATH ]; then
+ rm -rf $PROJECT_PATH
+ rm -rf ./package
+ echo "Deleted Previously created zip file"
+fi
+
+pip install --upgrade pip
+pip install --target ./package -r ../requirements.txt > $SUCCESS_OUTPUT_PATH
+pushd package
+zip -r ../$PROJECT_NAME.zip . > $SUCCESS_OUTPUT_PATH
+popd
+zip -g $PROJECT_NAME.zip lambda_function.py > $SUCCESS_OUTPUT_PATH
+zip -g $PROJECT_NAME.zip ../es_operations.py > $SUCCESS_OUTPUT_PATH
+zip -g $PROJECT_NAME.zip ../send_email.py > $SUCCESS_OUTPUT_PATH
+aws lambda update-function-code --function-name CloudSenseiAggFunction --zip-file fileb://$PROJECT_PATH --region $AWS_DEFAULT_REGION > $SUCCESS_OUTPUT_PATH
diff --git a/cloudsensei/email_template.j2 b/cloudsensei/email_template.j2
new file mode 100644
index 00000000..5c4019b9
--- /dev/null
+++ b/cloudsensei/email_template.j2
@@ -0,0 +1,26 @@
+{% set style="border-collapse:collapse;border:2px solid black;padding: 10px" %}
+
+
+
+
+ {% for keys in keys_list %}
+ {{keys}} |
+ {% endfor %}
+
+
+
+ {% for user, region_list in resources_list.items() %}
+ {% for region_name, resources_list in region_list.items() %}
+ {% for resources in resources_list %}
+ {% set _ = resources.update({'User': user, 'Region': region_name}) %}
+
+ {% for key in keys_list %}
+ {{resources[key]}} |
+ {% endfor %}
+
+ {% endfor %}
+ {% endfor %}
+ {% endfor %}
+
+
+
\ No newline at end of file
diff --git a/cloudsensei/env.sh b/cloudsensei/env.sh
new file mode 100644
index 00000000..47aca7ac
--- /dev/null
+++ b/cloudsensei/env.sh
@@ -0,0 +1,14 @@
+export ACCOUNT_ID=${ACCOUNT_ID:-""}
+export AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION:-"us-east-1"}
+export SEND_AGG_MAIL=${SEND_AGG_MAIL:-"yes"}
+export SLACK_API_TOKEN=${SLACK_API_TOKEN:-""}
+export SLACK_CHANNEL_NAME=${SLACK_CHANNEL_NAME:-""}
+export SES_HOST_ADDRESS=${SES_HOST_ADDRESS:-""}
+export SES_HOST_PORT=${SES_HOST_PORT:-0}
+export SES_USER_ID=${SES_USER_ID:-""}
+export SES_PASSWORD=${SES_PASSWORD:-""}
+export TO_ADDRESS=${TO_ADDRESS:-""}
+export CC_ADDRESS=${CC_ADDRESS:-""}
+export RESOURCE_DAYS=${RESOURCE_DAYS:-7}
+export ES_SERVER=${ES_SERVER:-""}
+export S3_BUCKET=${S3_BUCKET:-""}
diff --git a/cloudsensei/es_operations.py b/cloudsensei/es_operations.py
new file mode 100644
index 00000000..1407bb6f
--- /dev/null
+++ b/cloudsensei/es_operations.py
@@ -0,0 +1,45 @@
+import logging
+import os
+from datetime import datetime
+
+from elasticsearch import Elasticsearch
+
+
+class ESOperations:
+ """
+ This class performs es operations
+ """
+
+ ES_INDEX = "cloudsensei"
+ ES_DOC = '_doc'
+
+ def __init__(self):
+ self.__es_server = os.environ.get('ES_SERVER')
+ self.__es = Elasticsearch(self.__es_server)
+
+ def upload_to_es(self, data: dict, **kwargs):
+ """
+ This method uploads data to es
+ :return:
+ """
+ if not data.get('timestamp'):
+ data['timestamp'] = datetime.utcnow() # datetime.now()
+ # Upload data to elastic search server
+ try:
+ self.__es.index(index=self.ES_INDEX, doc_type=self.ES_DOC, body=data, **kwargs)
+ return True
+ except Exception as err:
+ raise err
+
+ def get_es_data_by_id(self, es_id: str):
+ """
+ This method fetch the data from the es based on the id
+ :param es_id:
+ :return:
+ """
+ try:
+ es_data = self.__es.get(index=self.ES_INDEX, id=es_id)
+ except Exception as err:
+ logging.error(err)
+ es_data = {}
+ return es_data
diff --git a/cloudsensei/lambda_function.py b/cloudsensei/lambda_function.py
new file mode 100644
index 00000000..0fda44f8
--- /dev/null
+++ b/cloudsensei/lambda_function.py
@@ -0,0 +1,212 @@
+import json
+import logging
+import os
+from time import time
+
+import boto3
+from datetime import datetime
+from jinja2 import Template
+
+from es_operations import ESOperations
+from send_email import send_email_with_ses
+from slack_operations import SlackOperations
+
+
+class EC2Operations:
+ """
+ This class performs the ec2 operations
+ """
+
+ SLACK_ITEM_SIZE = 50
+
+ def __init__(self):
+ self.__ec2_client = boto3.client('ec2', region_name='us-east-1')
+ self.__iam_client = boto3.client('iam')
+ self.__resource_days = int(os.environ.get('RESOURCE_DAYS', 7))
+
+ def set_ec2_client(self, region_name: str):
+ """
+ This method change the ec2_client object with another region
+ :param region_name:
+ :return:
+ """
+ self.__ec2_client = boto3.client('ec2', region_name=region_name)
+
+ def __get_all_instances(self):
+ """
+ This method returns all instances in a region
+ :return:
+ """
+ resource_list = []
+ resources = self.__ec2_client.describe_instances()
+ resource_list.extend(resources['Reservations'])
+ while 'NextToken' in resources.keys():
+ resources = self.__ec2_client.describe_instances(NextToken=resources['NextToken'])
+ resource_list.extend(resources['Reservations'])
+ return resource_list
+
+ def get_resources(self):
+ """
+ This method returns all the instances running more than 7 days
+ :return:
+ """
+ regions = self.__ec2_client.describe_regions()['Regions']
+ current_datetime = datetime.utcnow().date()
+ long_running_instances_by_user = {}
+ for region in regions:
+ region_name = region['RegionName']
+ self.set_ec2_client(region_name)
+ instances_list = self.__get_all_instances()
+ for instances in instances_list:
+ for resource in instances['Instances']:
+ skip = False
+ launch_time = resource.get('LaunchTime').date()
+ days = (current_datetime - launch_time).days
+ if days > self.__resource_days:
+ user = name = None
+ for tag in resource.get('Tags', []):
+ tag_key = tag.get('Key').lower()
+ if tag_key.lower() == 'cloudsensei':
+ skip = True
+ break
+ if tag_key == 'user':
+ user = tag.get('Value')
+ elif tag_key == 'name':
+ name = tag.get('Value')
+ if not skip and user:
+ long_running_instances_by_user.setdefault(user.lower(), {}).setdefault(region_name, []).append(
+ {'InstanceId': resource.get('InstanceId'),
+ 'Name': name, 'LaunchDate': str(launch_time),
+ 'RunningDays': f"{days} days", 'State': resource.get('State', {}).get('Name')})
+ return long_running_instances_by_user
+
+ def get_account_alias_name(self):
+ """
+ This method returns the account alias name
+ :return:
+ """
+ response = self.__iam_client.list_account_aliases()
+ account_alias = response['AccountAliases'][0]
+ return account_alias
+
+ def organize_message_to_send_slack(self, resources_list: dict):
+ """
+ This method returns the message to send to slack
+ :param resources_list:
+ :return:
+ """
+
+ divider = {"type": "divider"}
+ keys_list = ['User', 'Region', 'Name', 'InstanceId', 'LaunchDate', 'RunningDays']
+ rows = []
+ for user, region_list in resources_list.items():
+ for region_name, resources_list in region_list.items():
+ for resources in resources_list:
+ if resources:
+ resources.update({'User': user, 'Region': region_name})
+ rows.append({
+ "type": "section",
+ "fields": [{"type": "mrkdwn", "text": f"{str(resources.get(item))}"} for item in keys_list],
+ })
+ rows.append(divider)
+ item_blocks = [rows[i:i + self.SLACK_ITEM_SIZE] for i in range(0, len(rows), self.SLACK_ITEM_SIZE)] # splitting because slack block allows only 50 items
+ slack_message_block = []
+ for block in item_blocks:
+ slack_message_block.append(block)
+ return slack_message_block
+
+ def organize_message_to_seng_mail(self, resources_list: dict):
+ """
+ This method returns the mail message
+ :param resources_list:
+ :return:
+ """
+ keys_list = ['User', 'Region', 'Name', 'InstanceId', 'LaunchDate', 'State', 'RunningDays']
+ with open('email_template.j2') as template:
+ template = Template(template.read())
+ body = template.render({'resources_list': resources_list, 'keys_list': keys_list})
+ return body
+
+
+class ProcessData:
+ def __init__(self, subject):
+ self.__subject = subject
+
+ def send_email(self, organized_ec2_data):
+ """
+ This method send email
+ :return:
+ """
+ response = send_email_with_ses(body=organized_ec2_data, subject=self.__subject, to=os.environ.get('TO_ADDRESS'),
+ cc=os.environ.get('CC_ADDRESS'))
+ if response:
+ return 200, "Successfully sent an emails"
+ return 400, 'Something went wrong'
+
+ def save_to_elastic_search(self, organized_ec2_data, account_name):
+ """
+ This method saves the data in elasticsearch
+ :return:
+ """
+ es_operations = ESOperations()
+ data = {
+ 'body': organized_ec2_data,
+ 'subject': self.__subject,
+ 'index_id': f"{account_name.lower()}-{str(datetime.utcnow().date())}"
+ }
+ if es_operations.upload_to_es(data=data, id=data.get('index_id')):
+ return 200, "Successfully save date in elastic search"
+ return 400, 'Something went wrong'
+
+ def post_message_in_slack(self, slack_blocks, account_name):
+ """
+ This method posts message in slack
+ :return:
+ """
+ slack_operations = SlackOperations()
+ thread_ts = slack_operations.create_thread(account_name=account_name)
+ code = 400
+ message = 'Something went wrong, while posting to slack'
+ if thread_ts:
+ message = slack_operations.post_message_blocks_in_thread(message_blocks=slack_blocks, thread_ts=thread_ts)
+ code = 200
+ return code, message
+
+
+def lambda_handler(event, context):
+ """
+ This lambda function sends notifications to slack on lon running resources on AWS Cloud
+ :param event:
+ :param context:
+ :return:
+ """
+ start_time = time()
+ logging.info(f"{lambda_handler.__name__} started at {datetime.utcnow()}")
+ code = 400
+ message = "Something went wrong while sending the Notification"
+ extra_message = ''
+ ec2_operations = EC2Operations()
+ account_name = ec2_operations.get_account_alias_name()
+ process_data = ProcessData(subject=f'Daily Cloud Report: Long running instances in the @{account_name} account')
+ if os.environ.get("SLACK_API_TOKEN"):
+ slack_blocks = ec2_operations.organize_message_to_send_slack(ec2_operations.get_resources())
+ code, message = process_data.post_message_in_slack(slack_blocks=slack_blocks, account_name=account_name)
+ else:
+ organized_ec2_data = ec2_operations.organize_message_to_seng_mail(ec2_operations.get_resources())
+ if os.environ.get("SEND_AGG_MAIL", "no").lower() == "yes":
+ code, message = process_data.save_to_elastic_search(organized_ec2_data, account_name=account_name)
+ if os.environ.get('SES_HOST_ADDRESS'):
+ code, message = process_data.send_email(organized_ec2_data)
+ elif os.environ.get('SES_HOST_ADDRESS'):
+ if os.environ.get('SES_HOST_ADDRESS'):
+ code, message = process_data.send_email(organized_ec2_data)
+ else:
+ organized_ec2_data = ec2_operations.get_resources()
+ message = organized_ec2_data
+ code = 200
+ end_time = time()
+ return {
+ 'statusCode': code,
+ 'body': json.dumps({'message': message, 'extra_message': extra_message}),
+ 'total_running_time': f"{end_time - start_time} s"
+ }
diff --git a/cloudsensei/requirements.txt b/cloudsensei/requirements.txt
new file mode 100644
index 00000000..1b22ecc6
--- /dev/null
+++ b/cloudsensei/requirements.txt
@@ -0,0 +1,4 @@
+boto3==1.26.1
+requests==2.31.0
+jinja2==3.1.2
+elasticsearch==7.11.0
diff --git a/cloudsensei/run.sh b/cloudsensei/run.sh
new file mode 100755
index 00000000..b0b0e1c9
--- /dev/null
+++ b/cloudsensei/run.sh
@@ -0,0 +1,78 @@
+PROJECT_NAME="CloudSensei"
+SUCCESS_OUTPUT_PATH="/dev/null"
+ERROR_LOG="$(mktemp -d)/stderr.log"
+
+source ./env.sh
+
+
+action="$1"
+
+if [ -d "./terraform/.terraform" ]; then
+ echo "Deleting the existing .terraform folder"
+ rm -rf "./terraform/.terraform"
+fi
+
+if [ "$action" = "deploy" ]; then
+ echo "Clearing if previously created zip file"
+ PROJECT_PATH="$PWD/$PROJECT_NAME.zip"
+ if [ -f $PROJECT_PATH ]; then
+ rm -rf $PROJECT_PATH
+ rm -rf ./package
+ echo "Deleted Previously created zip file"
+ fi
+
+ pip install --upgrade pip
+ pip install --target ./package -r ./requirements.txt > $SUCCESS_OUTPUT_PATH
+ pushd ./package
+ zip -r ../$PROJECT_NAME.zip . > $SUCCESS_OUTPUT_PATH
+ popd
+ zip -g $PROJECT_NAME.zip lambda_function.py > $SUCCESS_OUTPUT_PATH
+ zip -g $PROJECT_NAME.zip email_template.j2 > $SUCCESS_OUTPUT_PATH
+ zip -g $PROJECT_NAME.zip slack_operations.py > $SUCCESS_OUTPUT_PATH
+ zip -g $PROJECT_NAME.zip es_operations.py > $SUCCESS_OUTPUT_PATH
+ zip -g $PROJECT_NAME.zip send_email.py > $SUCCESS_OUTPUT_PATH
+
+ pushd ./terraform
+ echo "#############################"
+ echo "Creating the lambda lambda_function using terraform"
+ if [ -n "$ACCOUNT_ID" ]; then
+ echo "Generating jinja files and tfvars file"
+ python ./Template.py
+ echo "Completed Generating tfvars file and jinja file"
+ if command -v terraform; then
+ if [ -s "$ERROR_LOG" ]; then
+ rm -f "$ERROR_LOG"
+ echo "Removed the stderr file if present"
+ fi
+ terraform init 1> $SUCCESS_OUTPUT_PATH
+ terraform state pull
+ terraform apply -var-file="./input_vars.tfvars" -auto-approve 2> "$ERROR_LOG"
+ if [[ -s "$ERROR_LOG" ]]; then
+ cat $ERROR_LOG
+ terraform destroy -var-file="./input_vars.tfvars" -auto-approve
+ echo "Validate your credentials/ Check the output"
+ else
+ echo "Successfully Created the lambda lambda_function"
+ fi
+ else
+ echo "Please install terraform install your local machine"
+ fi
+ else
+ echo "AWS ACCOUNT_ID is missing, please export the variable"
+ fi
+ echo "#############################"
+ popd
+else
+ pushd ./terraform
+ if [ "$action" = "destroy" ]; then
+ echo "Generating jinja files and tfvars file"
+ python ./Template.py
+ echo "Completed Generating tfvars file and jinja file"
+ terraform init 1> $SUCCESS_OUTPUT_PATH
+ terraform state pull
+ terraform destroy -var-file="./input_vars.tfvars" -auto-approve
+ else
+ echo "Invalid argument passed, supported only deploy, destroy"
+ fi
+ popd
+fi
diff --git a/cloudsensei/send_email.py b/cloudsensei/send_email.py
new file mode 100644
index 00000000..3363aa73
--- /dev/null
+++ b/cloudsensei/send_email.py
@@ -0,0 +1,42 @@
+import logging
+import ssl
+import os
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from smtplib import SMTP
+
+
+def send_email_with_ses(to: any, body: str, subject: str, cc: any = None):
+ """
+ This method sends the mail
+ :param subject:
+ :param to:
+ :param body:
+ :param cc:
+ :return:
+ """
+ host = os.environ.get("SES_HOST_ADDRESS", '')
+ port = int(os.environ.get("SES_HOST_PORT", 0))
+ user = os.environ.get("SES_USER_ID", )
+ password = os.environ.get("SES_PASSWORD", '')
+ if host and port and user and password:
+ context = ssl.create_default_context()
+ msg = MIMEMultipart('alternative')
+ msg["Subject"] = subject
+ msg["From"] = "noreply@aws.rhperfscale.org"
+ msg["To"] = ", ".join(to) if type(to) == list else to
+ if cc:
+ msg["Cc"] = ",".join(cc) if type(cc) == list else cc
+ msg.attach(MIMEText(body, 'html'))
+ try:
+ with SMTP(host, port) as server:
+ server.starttls(context=context)
+ server.login(user=user, password=password)
+ server.send_message(msg)
+ logging.info(f"Successfully sent mail To: {to}, Cc: {cc}")
+ return True
+ except Exception as err:
+ logging.error(f"Error raised: {err}")
+ else:
+ logging.info("Missing mailing fields, please check did you pass all fields")
+ return False
diff --git a/cloudsensei/slack_operations.py b/cloudsensei/slack_operations.py
new file mode 100644
index 00000000..aedceee1
--- /dev/null
+++ b/cloudsensei/slack_operations.py
@@ -0,0 +1,66 @@
+import logging
+import os
+from datetime import datetime
+
+import requests
+
+
+class SlackOperations:
+ """
+ This class performs the Slack operations
+ """
+
+ SLACK_POST_API = 'https://slack.com/api/chat.postMessage' # API to post messages in slack
+
+ def __init__(self):
+ self.__slack_auth_token = os.environ['SLACK_API_TOKEN']
+ self.__channel_name = f'#{os.environ["SLACK_CHANNEL_NAME"]}' # before entering channel add your app to this channel
+ self.api_headers = {
+ 'Content-Type': 'application/json',
+ 'Authorization': f'Bearer {self.__slack_auth_token}'
+ }
+
+ def post_message(self, blocks: list, thread_ts: str = None):
+ """
+ This method post block message in slack
+ :param thread_ts:
+ :param blocks:
+ :return:
+ """
+ json_data = {
+ 'channel': self.__channel_name,
+ 'blocks': blocks
+ }
+ if thread_ts:
+ json_data['thread_ts'] = thread_ts
+ response = requests.post(url=self.SLACK_POST_API, headers=self.api_headers, json=json_data)
+ response_data = response.json()
+ return response_data
+
+ def create_thread(self, account_name: str):
+ """
+ This method sends the header first to create a thread in slack
+ :return:
+ """
+ header = f":zap: Daily Report @ {datetime.utcnow().date()}: Account *{account_name.title()}* has following long running instances"
+ blocks = [{"type": "section", "text": {"type": "mrkdwn", "text": header}}]
+ response_data = self.post_message(blocks=blocks)
+ if response_data:
+ if response_data.get('ok'):
+ logging.info(f"Successfully Created a Thread @timestamp={response_data.get('ts')}")
+ return response_data.get('ts')
+ return None
+
+ def post_message_blocks_in_thread(self, message_blocks: list, thread_ts: str):
+ """
+ This method post messages in thread
+ :param message_blocks:
+ :param thread_ts:
+ :return:
+ """
+ success_sends = 0
+ for index, block in enumerate(message_blocks):
+ response = self.post_message(blocks=block, thread_ts=thread_ts)
+ if response.get('ok'):
+ success_sends += 1
+ return f"Total blocks: {len(message_blocks)}, Total Successes blocks: {success_sends}"
diff --git a/cloudsensei/terraform/CloudSenseiLambdaPolicy.j2 b/cloudsensei/terraform/CloudSenseiLambdaPolicy.j2
new file mode 100644
index 00000000..66fb21b1
--- /dev/null
+++ b/cloudsensei/terraform/CloudSenseiLambdaPolicy.j2
@@ -0,0 +1,29 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeInstances",
+ "ec2:DescribeRegions",
+ "iam:ListAccountAliases"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": "logs:CreateLogGroup",
+ "Resource": "arn:aws:logs:{{AWS_DEFAULT_REGION}}:{{ACCOUNT_ID}}:*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "logs:CreateLogStream",
+ "logs:PutLogEvents"
+ ],
+ "Resource": [
+ "arn:aws:logs:{{AWS_DEFAULT_REGION}}:{{ACCOUNT_ID}}:log-group:/aws/lambda/CloudSensei:*"
+ ]
+ }
+ ]
+}
diff --git a/cloudsensei/terraform/CloudSenseiLambdaRole.json b/cloudsensei/terraform/CloudSenseiLambdaRole.json
new file mode 100644
index 00000000..fd267525
--- /dev/null
+++ b/cloudsensei/terraform/CloudSenseiLambdaRole.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "lambda.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/cloudsensei/terraform/Template.py b/cloudsensei/terraform/Template.py
new file mode 100644
index 00000000..f99ced61
--- /dev/null
+++ b/cloudsensei/terraform/Template.py
@@ -0,0 +1,60 @@
+import os
+
+from jinja2 import Environment, FileSystemLoader, Template
+
+
+def inject_variables():
+ """
+ This method injects the variables into the jinja file and create a json file
+ :return:
+ """
+ account_id = os.environ.get('ACCOUNT_ID', 1)
+ aws_region = os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')
+ s3_bucket_name = os.environ.get('S3_BUCKET')
+ if account_id:
+ with open('CloudSenseiLambdaPolicy.j2') as file:
+ template_loader = Template(file.read())
+ with open('./CloudSenseiLambdaPolicy.json', 'w') as write_file:
+ write_file.write(template_loader.render({'ACCOUNT_ID': account_id, 'AWS_DEFAULT_REGION': aws_region}))
+ else:
+ print("AccountId is missing")
+ if s3_bucket_name:
+ with open('backend.j2') as backend_file:
+ template_loader = Template(backend_file.read())
+ with open('./backend.tf', 'w') as backend_write_file:
+ backend_write_file.write(template_loader.render({'AWS_DEFAULT_REGION': aws_region,
+ 'S3_BUCKET_NAME': s3_bucket_name}))
+ resource_days = os.environ.get('RESOURCE_DAYS', '7')
+ slack_api_token = os.environ.get('slack_api_token', '')
+ slack_channel_name = os.environ.get('SLACK_CHANNEL_NAME', '')
+ ses_host_address = os.environ.get('SES_HOST_ADDRESS', '')
+ ses_host_port = int(os.environ.get('SES_HOST_PORT', '0'))
+ ses_user_id = os.environ.get('SES_USER_ID', '')
+ ses_password = os.environ.get('SES_PASSWORD', '')
+ to_addresses = os.environ.get('TO_ADDRESS', '')
+ cc_addresses = os.environ.get('CC_ADDRESS', '')
+ send_agg_mail = os.environ.get('SEND_AGG_MAIL')
+ es_server = os.environ.get('ES_SERVER')
+ context = f'AWS_DEFAULT_REGION="{aws_region}"'
+ if resource_days:
+ context = f'RESOURCE_DAYS="{resource_days}"'
+ if slack_api_token and slack_channel_name:
+ context += f'\nSLACK_API_TOKEN="{slack_api_token}"\nSLACK_CHANNEL_NAME="{slack_channel_name}"'
+ if ses_host_address and ses_host_port and ses_password and ses_password:
+ context += f'\nSES_HOST_ADDRESS="{ses_host_address}"' \
+ f'\nSES_HOST_PORT="{ses_host_port}"' \
+ f'\nSES_USER_ID="{ses_user_id}"' \
+ f'\nSES_PASSWORD="{ses_password}"'
+ if to_addresses:
+ context += f'\nTO_ADDRESS="{to_addresses}"'
+ if cc_addresses:
+ context += f'\nCC_ADDRESS="{cc_addresses}"'
+ if send_agg_mail:
+ context += f'\nSEND_AGG_MAIL="{send_agg_mail}"'
+ if es_server:
+ context += f'\nES_SERVER="{es_server}"'
+ with open('./input_vars.tfvars', 'w') as write_tf_vars:
+ write_tf_vars.write(context)
+
+
+inject_variables()
diff --git a/cloudsensei/terraform/backend.j2 b/cloudsensei/terraform/backend.j2
new file mode 100644
index 00000000..b6859603
--- /dev/null
+++ b/cloudsensei/terraform/backend.j2
@@ -0,0 +1,7 @@
+terraform {
+ backend "s3" {
+ bucket = "{{S3_BUCKET_NAME}}"
+ key = "terraform.tfstate"
+ region = "us-east-2"
+ }
+}
diff --git a/cloudsensei/terraform/event_bridge.tf b/cloudsensei/terraform/event_bridge.tf
new file mode 100644
index 00000000..ead4d02d
--- /dev/null
+++ b/cloudsensei/terraform/event_bridge.tf
@@ -0,0 +1,61 @@
+resource "aws_scheduler_schedule_group" "cloud_sensei_group" {
+ name = "CloudSenseiGroup"
+ tags = {
+ User = "cloudsensei"
+ }
+}
+
+
+resource "aws_scheduler_schedule" "cloud_sensi_event_bridge_scheduler" {
+ name = "CloudSenseiScheduler"
+ group_name = aws_scheduler_schedule_group.cloud_sensei_group.name
+
+ flexible_time_window {
+ mode = "OFF"
+ }
+
+ schedule_expression = "cron(30 16 * * ? *)"
+ schedule_expression_timezone = "Asia/Kolkata"
+ target {
+ arn = module.lambda_function_existing_package_local.lambda_function_arn
+ role_arn = aws_iam_role.event_bridge_role.arn
+ }
+
+}
+
+resource "aws_iam_role" "event_bridge_role" {
+
+ assume_role_policy = jsonencode({
+ Version = "2012-10-17"
+ Statement = [
+ {
+ Action = "sts:AssumeRole"
+ Effect = "Allow"
+ Sid = ""
+ Principal = {
+ Service = "scheduler.amazonaws.com"
+ },
+ },
+ ]
+ })
+ inline_policy {
+ name = "CloudSenseiEventBridgeExecutionPolicy"
+
+ policy = jsonencode({
+ Version = "2012-10-17"
+ Statement = [
+ {
+ Action = ["lambda:InvokeFunction"]
+ Effect = "Allow"
+ Resource = [module.lambda_function_existing_package_local.lambda_function_arn,
+ "${module.lambda_function_existing_package_local.lambda_function_arn}/*"]
+ },
+ ]
+ })
+ }
+
+ tags = {
+ User = "cloudsensei"
+ }
+ name = "CloudSenseiEvenBrideRole"
+}
diff --git a/cloudsensei/terraform/lambda.tf b/cloudsensei/terraform/lambda.tf
new file mode 100644
index 00000000..76f363e5
--- /dev/null
+++ b/cloudsensei/terraform/lambda.tf
@@ -0,0 +1,46 @@
+module "lambda_function_existing_package_local" {
+ source = "terraform-aws-modules/lambda/aws"
+ lambda_role = aws_iam_role.cloud_sensei_iam_role.arn
+ function_name = "CloudSensei"
+ description = "Daily reporting on Cloud Usage"
+ memory_size = 256
+ package_type = "Zip"
+ tags = {
+ User = "cloudsensei"
+ }
+ timeout = 300
+ environment_variables = {
+ SLACK_API_TOKEN = var.SLACK_API_TOKEN
+ SLACK_CHANNEL_NAME = var.SLACK_CHANNEL_NAME
+ RESOURCE_DAYS = var.RESOURCE_DAYS
+ SES_HOST_ADDRESS = var.SES_HOST_ADDRESS
+ SES_HOST_PORT = var.SES_HOST_PORT
+ SES_USER_ID = var.SES_USER_ID
+ SES_PASSWORD = var.SES_PASSWORD
+ TO_ADDRESS = var.TO_ADDRESS
+ CC_ADDRESS = var.CC_ADDRESS
+ ES_SERVER = var.ES_SERVER
+ SEND_AGG_MAIL = var.SEND_AGG_MAIL
+ }
+ runtime = "python3.9"
+ local_existing_package = "./../CloudSensei.zip"
+ handler = "lambda_function.lambda_handler"
+ create_package = false
+ create_role = false
+}
+
+# Create Lambda Role Execution policy, with specified resource permissions
+resource "aws_iam_role" "cloud_sensei_iam_role" {
+
+ name = "CloudSenseiLambdaRole"
+
+ assume_role_policy = file("./CloudSenseiLambdaRole.json")
+ inline_policy {
+ name = "CloudSenseiLambdaPolicy"
+ policy = file("./CloudSenseiLambdaPolicy.json")
+ }
+ tags = {
+ User = "cloudsensei"
+ }
+
+}
diff --git a/cloudsensei/terraform/main.tf b/cloudsensei/terraform/main.tf
new file mode 100644
index 00000000..45fb3f58
--- /dev/null
+++ b/cloudsensei/terraform/main.tf
@@ -0,0 +1,3 @@
+provider "aws" {
+ region = var.AWS_DEFAULT_REGION
+}
diff --git a/cloudsensei/terraform/variables.tf b/cloudsensei/terraform/variables.tf
new file mode 100644
index 00000000..72a592ec
--- /dev/null
+++ b/cloudsensei/terraform/variables.tf
@@ -0,0 +1,61 @@
+variable "SLACK_API_TOKEN" {
+ type = string
+ description = "Slack OAuth Token"
+ default = null
+}
+
+variable "SLACK_CHANNEL_NAME" {
+ type = string
+ description = "Slack Channel id/name"
+ default = null
+}
+
+variable "AWS_DEFAULT_REGION" {
+ default = "us-east-1"
+}
+
+variable "ACCOUNT_ID" {
+ default = null
+}
+
+variable "RESOURCE_DAYS" {
+ type = number
+ default = 7
+}
+
+variable "SES_HOST_ADDRESS" {
+ type = string
+ default = null
+}
+
+variable "SES_HOST_PORT" {
+ type = number
+ default = null
+}
+variable "SES_USER_ID" {
+ type = string
+ default = null
+}
+variable "SES_PASSWORD" {
+ type = string
+ default = null
+}
+
+variable "TO_ADDRESS" {
+ type = string
+ default = null
+}
+variable "CC_ADDRESS" {
+ type = string
+ default = null
+}
+
+variable "SEND_AGG_MAIL" {
+ type = string
+ default = "no"
+}
+
+variable "ES_SERVER" {
+ type = string
+ default = null
+}
diff --git a/docs/source/index.md b/docs/source/index.md
index 09906c4e..57f9beba 100644
--- a/docs/source/index.md
+++ b/docs/source/index.md
@@ -23,7 +23,7 @@ This tool support the following policies:
* [s3_inactive](../../cloud_governance/policy/aws/s3_inactive.py): Get the inactive/empty buckets and delete them after 7 days.
* [empty_roles](../../cloud_governance/policy/aws/empty_roles.py): Get empty roles and delete it after 7 days.
* [zombie_snapshots](../../cloud_governance/policy/aws/zombie_snapshots.py): Get the zombie snapshots and delete it after 7 days.
-* [nat_gateway_unused](../../cloud_governance/policy/aws/nat_gateway_unused.py): Get the unused nat gateways and deletes it after 7 days.
+* [nat_gateway_unused](../../cloud_governance/policy/aws/unused_nat_gateway.py): Get the unused nat gateways and deletes it after 7 days.
* gitleaks: scan Github repository git leak (security scan)
* [cost_over_usage](../../cloud_governance/policy/aws/cost_over_usage.py): send mail to aws user if over usage cost
diff --git a/grafana/clouds/aws/cost_explorer_main.json b/grafana/clouds/aws/cost_explorer_main.json
new file mode 100644
index 00000000..4b72fc23
--- /dev/null
+++ b/grafana/clouds/aws/cost_explorer_main.json
@@ -0,0 +1,4673 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 124,
+ "iteration": 1683606984477,
+ "links": [
+ {
+ "asDropdown": false,
+ "icon": "dashboard",
+ "includeVars": false,
+ "keepTime": false,
+ "tags": [],
+ "targetBlank": true,
+ "title": "Clouds Payer Dashboards",
+ "tooltip": "",
+ "type": "link",
+ "url": "http://grafana.intlab.perf-infra.lab.eng.rdu2.redhat.com/d/ckeZn1o4k/payer-account-billing-reports?orgId=7&var-CloudName=All&var-Owner=Shai&var-CostCenter=All&var-Account=All&var-AccountId=All"
+ },
+ {
+ "asDropdown": false,
+ "icon": "dashboard",
+ "includeVars": false,
+ "keepTime": false,
+ "tags": [],
+ "targetBlank": true,
+ "title": "IBM Dashboards",
+ "tooltip": "",
+ "type": "link",
+ "url": "http://grafana.intlab.perf-infra.lab.eng.rdu2.redhat.com/d/dvtz2vHVz/ibm-monthly-invoice-dashboard?orgId=7"
+ },
+ {
+ "asDropdown": false,
+ "icon": "external link",
+ "includeVars": false,
+ "keepTime": false,
+ "tags": [],
+ "targetBlank": true,
+ "title": "GSheet Links",
+ "tooltip": "",
+ "type": "link",
+ "url": "https://docs.google.com/spreadsheets/d/1EHFGVgjMc9Usl-0QFSKl5UmtMeqAiodQDlBSL9qx2XY/edit#gid=0"
+ }
+ ],
+ "liveNow": false,
+ "panels": [
+ {
+ "description": "",
+ "gridPos": {
+ "h": 8,
+ "w": 5,
+ "x": 7,
+ "y": 0
+ },
+ "id": 2,
+ "libraryPanel": {
+ "description": "",
+ "meta": {
+ "connectedDashboards": 5,
+ "created": "2022-06-17T12:14:14Z",
+ "createdBy": {
+ "avatarUrl": "/avatar/094e42d44756239ce2006664467f047b",
+ "id": 86,
+ "name": "athiruma"
+ },
+ "folderName": "General",
+ "folderUid": "",
+ "updated": "2022-11-23T06:58:09Z",
+ "updatedBy": {
+ "avatarUrl": "/avatar/4925d4e6629bfce243ae0033f77c3aa2",
+ "id": 85,
+ "name": "ebattat"
+ }
+ },
+ "name": "Cloud Governance Nightly Report",
+ "type": "text",
+ "uid": "E56aJXj7z",
+ "version": 5
+ },
+ "options": {
+ "content": "\n![Cloud Governance](https://github.com/redhat-performance/cloud-governance/blob/main/images/cloud_governance.png?raw=true \"Tooltip Text\")\n",
+ "mode": "markdown"
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "timestamp",
+ "id": "2",
+ "settings": {
+ "interval": "auto"
+ },
+ "type": "date_histogram"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "I9yrJ19nz"
+ },
+ "metrics": [
+ {
+ "id": "1",
+ "type": "count"
+ }
+ ],
+ "query": "",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Cloud Governance Nightly Cost Report",
+ "type": "text"
+ },
+ {
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 3,
+ "x": 21,
+ "y": 0
+ },
+ "id": 121,
+ "options": {
+ "content": "![AWS](https://pbs.twimg.com/profile_images/1402754057245138947/Yz4xMoJC_400x400.jpg \"Tooltip Text\")\n",
+ "mode": "markdown"
+ },
+ "pluginVersion": "8.5.14",
+ "type": "text"
+ },
+ {
+ "collapsed": true,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 8
+ },
+ "id": 28,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "7VSc2zpVz"
+ },
+ "description": "Show last 2 days for showing final cost",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1,
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 100
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 9
+ },
+ "id": 26,
+ "options": {
+ "barRadius": 0,
+ "barWidth": 0.97,
+ "groupWidth": 0.7,
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "table",
+ "placement": "right"
+ },
+ "orientation": "auto",
+ "showValue": "always",
+ "stacking": "none",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ },
+ "xTickLabelRotation": 0,
+ "xTickLabelSpacing": 0
+ },
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Budget.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "7VSc2zpVz"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Budget: $Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Cost Per Account: $Account",
+ "transformations": [
+ {
+ "id": "organize",
+ "options": {}
+ }
+ ],
+ "type": "barchart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "7VSc2zpVz"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisGridShow": false,
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 9,
+ "gradientMode": "hue",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 6,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "always",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 17
+ },
+ "id": 36,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right"
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.3.3",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Budget.keyword",
+ "id": "4",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "timestamp",
+ "id": "3",
+ "settings": {
+ "interval": "auto",
+ "min_doc_count": "0",
+ "timeZone": "utc",
+ "trimEdges": "0"
+ },
+ "type": "date_histogram"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "7VSc2zpVz"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "max"
+ }
+ ],
+ "query": "Budget: $Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "type": "timeseries"
+ }
+ ],
+ "title": "Cost Per Account",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 9
+ },
+ "id": 128,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "LdTmDL2Vk"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1,
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 10
+ },
+ "id": 130,
+ "options": {
+ "barRadius": 0,
+ "barWidth": 0.97,
+ "groupWidth": 0.7,
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "table",
+ "placement": "right"
+ },
+ "orientation": "vertical",
+ "showValue": "always",
+ "stacking": "none",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ },
+ "xTickLabelRotation": 0,
+ "xTickLabelSpacing": 200
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "ChargeType.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "10"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "LdTmDL2Vk"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Budget: $Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "$Account Charge Type",
+ "transformations": [
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "desc": true,
+ "field": "Sum"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "barchart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "LdTmDL2Vk"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": [],
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 14,
+ "w": 24,
+ "x": 0,
+ "y": 18
+ },
+ "id": 131,
+ "options": {
+ "displayLabels": [
+ "value"
+ ],
+ "legend": {
+ "displayMode": "list",
+ "placement": "right",
+ "values": [
+ "value"
+ ]
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "/^Sum$/",
+ "values": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "ChargeType.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "LdTmDL2Vk"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Budget: $Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "$Account Charge Type",
+ "transformations": [
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "desc": true,
+ "field": "Sum"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "LdTmDL2Vk"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "hue",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "always",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 14,
+ "w": 24,
+ "x": 0,
+ "y": 32
+ },
+ "id": 132,
+ "options": {
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "list",
+ "placement": "right"
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "ChargeType.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "timestamp",
+ "id": "3",
+ "settings": {
+ "interval": "auto",
+ "min_doc_count": "0",
+ "timeZone": "utc",
+ "trimEdges": "0"
+ },
+ "type": "date_histogram"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "LdTmDL2Vk"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "max"
+ }
+ ],
+ "query": "Budget: $Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "$Account Charge Type",
+ "transformations": [
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "desc": true,
+ "field": "Sum"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "timeseries"
+ }
+ ],
+ "title": "Account Charge Type",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 10
+ },
+ "id": 137,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "X26Kz1xVz"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1,
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 11
+ },
+ "id": 134,
+ "options": {
+ "barRadius": 0,
+ "barWidth": 0.97,
+ "groupWidth": 0.7,
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "list",
+ "placement": "right"
+ },
+ "orientation": "auto",
+ "showValue": "always",
+ "stacking": "none",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ },
+ "xTickLabelRotation": 0,
+ "xTickLabelSpacing": 0
+ },
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "PurchaseType.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "X26Kz1xVz"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Budget.keyword= $Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "$Account Purchase Types",
+ "type": "barchart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "X26Kz1xVz"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 25,
+ "gradientMode": "opacity",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "always",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 18
+ },
+ "id": 135,
+ "options": {
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "list",
+ "placement": "right"
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "PurchaseType.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "timestamp",
+ "id": "3",
+ "settings": {
+ "interval": "auto",
+ "min_doc_count": "0",
+ "timeZone": "utc",
+ "trimEdges": "0"
+ },
+ "type": "date_histogram"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "X26Kz1xVz"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "max"
+ }
+ ],
+ "query": "Budget.keyword= $Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "$Account Purchase Types",
+ "type": "timeseries"
+ }
+ ],
+ "title": "PurchaseTypes",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 11
+ },
+ "id": 12,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "UNThhktVk"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "yellow",
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": [],
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 14,
+ "w": 24,
+ "x": 0,
+ "y": 28
+ },
+ "id": 31,
+ "options": {
+ "displayLabels": [
+ "name"
+ ],
+ "legend": {
+ "displayMode": "table",
+ "placement": "right",
+ "values": [
+ "percent",
+ "value"
+ ]
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "limit": 100,
+ "values": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Project.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "UNThhktVk"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Budget: $Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Cost Per Project: $Account",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "UNThhktVk"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1,
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 100
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 24,
+ "x": 0,
+ "y": 42
+ },
+ "id": 14,
+ "options": {
+ "barRadius": 0,
+ "barWidth": 0.97,
+ "groupWidth": 0.7,
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "table",
+ "placement": "right"
+ },
+ "orientation": "auto",
+ "showValue": "always",
+ "stacking": "none",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ },
+ "xTickLabelRotation": -45,
+ "xTickLabelSpacing": 0
+ },
+ "pluginVersion": "8.3.3",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Project.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "UNThhktVk"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Budget: $Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "$Account",
+ "transformations": [],
+ "type": "barchart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "UNThhktVk"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisGridShow": false,
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 9,
+ "gradientMode": "hue",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 6,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "always",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 54
+ },
+ "id": 39,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right"
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.3.3",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Project.keyword",
+ "id": "4",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "10"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "timestamp",
+ "id": "3",
+ "settings": {
+ "interval": "auto",
+ "min_doc_count": "0",
+ "timeZone": "utc",
+ "trimEdges": "0"
+ },
+ "type": "date_histogram"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "UNThhktVk"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "max"
+ }
+ ],
+ "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "$Account",
+ "type": "timeseries"
+ }
+ ],
+ "title": "Cost Per Project",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 12
+ },
+ "id": 18,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "GqAphkp4z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": [],
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 24,
+ "x": 0,
+ "y": 13
+ },
+ "id": 29,
+ "options": {
+ "displayLabels": [
+ "name"
+ ],
+ "legend": {
+ "displayMode": "list",
+ "placement": "right",
+ "values": [
+ "percent"
+ ]
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Manager.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "10"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "GqAphkp4z"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "max"
+ }
+ ],
+ "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Cost Per Manager: $Account",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "GqAphkp4z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1,
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 100
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 26
+ },
+ "id": 20,
+ "options": {
+ "barRadius": 0,
+ "barWidth": 0.97,
+ "groupWidth": 0.7,
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "table",
+ "placement": "right"
+ },
+ "orientation": "auto",
+ "showValue": "always",
+ "stacking": "none",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ },
+ "xTickLabelRotation": -45,
+ "xTickLabelSpacing": 0
+ },
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Manager.keyword",
+ "id": "3",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "GqAphkp4z"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Budget=$Account ",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "$Account",
+ "type": "barchart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "GqAphkp4z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisGridShow": false,
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 9,
+ "gradientMode": "hue",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 6,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "always",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 35
+ },
+ "id": 41,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right"
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.3.3",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Manager.keyword",
+ "id": "4",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "10"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "timestamp",
+ "id": "3",
+ "settings": {
+ "interval": "auto",
+ "min_doc_count": "0",
+ "timeZone": "utc",
+ "trimEdges": "0"
+ },
+ "type": "date_histogram"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "GqAphkp4z"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "max"
+ }
+ ],
+ "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "$Account",
+ "type": "timeseries"
+ }
+ ],
+ "title": "Cost Per Manager",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 13
+ },
+ "id": 6,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "XdKgWRtVz"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "orange",
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": [],
+ "min": 1,
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 24,
+ "x": 0,
+ "y": 14
+ },
+ "id": 30,
+ "options": {
+ "displayLabels": [
+ "name"
+ ],
+ "legend": {
+ "displayMode": "table",
+ "placement": "right",
+ "values": [
+ "percent",
+ "value"
+ ]
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "limit": 100,
+ "values": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "User.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "XdKgWRtVz"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Cost Per Users $Account",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "XdKgWRtVz"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ },
+ "custom": {
+ "axisGridShow": true,
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1,
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 100
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 8,
+ "options": {
+ "barRadius": 0,
+ "barWidth": 0.97,
+ "groupWidth": 0.7,
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "table",
+ "placement": "right"
+ },
+ "orientation": "auto",
+ "showValue": "always",
+ "stacking": "none",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ },
+ "xTickLabelRotation": -45,
+ "xTickLabelSpacing": 0
+ },
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "User.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "XdKgWRtVz"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Budget=$Account AND !NoTagKey AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "$Account",
+ "type": "barchart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "XdKgWRtVz"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisGridShow": false,
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 9,
+ "gradientMode": "hue",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 6,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "always",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 37
+ },
+ "id": 46,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right"
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.3.3",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "User.keyword",
+ "id": "4",
+ "settings": {
+ "min_doc_count": "1",
+ "missing": "User.keyword",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "timestamp",
+ "id": "3",
+ "settings": {
+ "interval": "auto",
+ "min_doc_count": "0",
+ "timeZone": "utc",
+ "trimEdges": "0"
+ },
+ "type": "date_histogram"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "XdKgWRtVz"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "settings": {
+ "missing": "0"
+ },
+ "type": "max"
+ }
+ ],
+ "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "$Account",
+ "type": "timeseries"
+ }
+ ],
+ "title": "Cost Per Users",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 14
+ },
+ "id": 113,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "PQlAtADVz"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 17,
+ "x": 0,
+ "y": 15
+ },
+ "id": 115,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "PQlAtADVz"
+ },
+ "metrics": [
+ {
+ "id": "1",
+ "settings": {
+ "size": "500"
+ },
+ "type": "raw_data"
+ }
+ ],
+ "query": "!\"notify_admin\"",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Monthly report",
+ "transformations": [
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "desc": false,
+ "field": "Account.keyword"
+ }
+ ]
+ }
+ },
+ {
+ "id": "filterFieldsByName",
+ "options": {
+ "include": {
+ "names": [
+ "Account",
+ "MessageType",
+ "Policy",
+ "To",
+ "timestamp"
+ ]
+ }
+ }
+ },
+ {
+ "id": "convertFieldType",
+ "options": {
+ "conversions": [
+ {
+ "destinationType": "string",
+ "targetField": "MessageType"
+ }
+ ],
+ "fields": {}
+ }
+ },
+ {
+ "id": "filterByValue",
+ "options": {
+ "filters": [
+ {
+ "config": {
+ "id": "equal",
+ "options": {
+ "value": "null"
+ }
+ },
+ "fieldName": "MessageType"
+ },
+ {
+ "config": {
+ "id": "equal",
+ "options": {
+ "value": "undefined"
+ }
+ },
+ "fieldName": "MessageType"
+ }
+ ],
+ "match": "any",
+ "type": "exclude"
+ }
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {},
+ "indexByName": {},
+ "renameByName": {
+ "Account": "",
+ "MessageType": "",
+ "To": "User",
+ "timestamp": "Alert Dat"
+ }
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "PQlAtADVz"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 17,
+ "x": 0,
+ "y": 24
+ },
+ "id": 138,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Policy.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "MessageType.keyword",
+ "id": "3",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "PQlAtADVz"
+ },
+ "metrics": [
+ {
+ "id": "1",
+ "type": "count"
+ }
+ ],
+ "query": "",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Monthly report",
+ "transformations": [],
+ "type": "table"
+ }
+ ],
+ "title": "Mail Alerts",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 15
+ },
+ "id": 90,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "KwQ6eOD4z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 16
+ },
+ "id": 96,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "KwQ6eOD4z"
+ },
+ "metrics": [
+ {
+ "id": "1",
+ "settings": {
+ "size": "500"
+ },
+ "type": "raw_data"
+ }
+ ],
+ "query": "Account=$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Skip resources in $Account",
+ "transformations": [
+ {
+ "id": "filterFieldsByName",
+ "options": {
+ "include": {
+ "names": [
+ "Region",
+ "ResourceId",
+ "ResourceName",
+ "User"
+ ]
+ }
+ }
+ },
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "desc": false,
+ "field": "User"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "KwQ6eOD4z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 7,
+ "x": 0,
+ "y": 24
+ },
+ "id": 94,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": false
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Account.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "KwQ6eOD4z"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Account=$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Cost of skip resources in $Account",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "KwQ6eOD4z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 7,
+ "x": 7,
+ "y": 24
+ },
+ "id": 98,
+ "options": {
+ "displayLabels": [
+ "name",
+ "value"
+ ],
+ "legend": {
+ "displayMode": "table",
+ "placement": "right",
+ "values": []
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "/^Count$/",
+ "values": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "User.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "KwQ6eOD4z"
+ },
+ "metrics": [
+ {
+ "id": "1",
+ "type": "count"
+ }
+ ],
+ "query": "Account=$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Skip resources by User in $Account",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "KwQ6eOD4z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 14,
+ "y": 24
+ },
+ "id": 92,
+ "options": {
+ "displayLabels": [
+ "name",
+ "value"
+ ],
+ "legend": {
+ "displayMode": "list",
+ "placement": "right"
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "/^Count$/",
+ "values": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "asc"
+ }
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "ResourceName.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "KwQ6eOD4z"
+ },
+ "metrics": [
+ {
+ "id": "1",
+ "type": "count"
+ }
+ ],
+ "query": "Account.keyword=$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Skip policy Resources in $Account",
+ "transformations": [
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "desc": true,
+ "field": "Cost"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "piechart"
+ }
+ ],
+ "title": "Skip Policy Resources",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 16
+ },
+ "id": 61,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "VRMNYTR4k"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 45
+ },
+ "id": 57,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "last"
+ ],
+ "fields": "",
+ "values": true
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "8.5.9",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "region.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "VRMNYTR4k"
+ },
+ "metrics": [
+ {
+ "field": "count",
+ "id": "1",
+ "type": "max"
+ }
+ ],
+ "query": "policy.keyword:\"ec2_stop\" AND account.keyword:$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "EC2_Stop: $Account: ",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "VRMNYTR4k"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "color-text",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "region.keyword"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 273
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 53
+ },
+ "id": 59,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true,
+ "sortBy": []
+ },
+ "pluginVersion": "8.5.9",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "ec2_stop.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "region.keyword",
+ "id": "3",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "VRMNYTR4k"
+ },
+ "metrics": [
+ {
+ "field": "count",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "policy.keyword:\"ec2_stop\" AND account.keyword:$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "EC2_Stop: $Account: ",
+ "transformations": [
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Sum": true
+ },
+ "indexByName": {
+ "Sum": 2,
+ "ec2_stop.keyword": 1,
+ "region.keyword": 0
+ },
+ "renameByName": {
+ "ec2_stop.keyword": "InstanceId | Name | User | LaunchTime | Policy ( Not_Delete )"
+ }
+ }
+ },
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "field": "region.keyword"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "table"
+ }
+ ],
+ "title": "Policy: EC2 Stop >= 30 days",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 17
+ },
+ "id": 48,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "VRMNYTR4k"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 16
+ },
+ "id": 50,
+ "interval": "1d",
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": false
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "region.keyword",
+ "id": "3",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "VRMNYTR4k"
+ },
+ "hide": false,
+ "metrics": [
+ {
+ "field": "resources",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "policy.keyword:\"ec2_idle\" AND account.keyword:$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "EC2 Idle: $Account",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "VRMNYTR4k"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "resources_list.keyword"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 1056
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "region.keyword"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 134
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "timestamp"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 131
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 51,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true,
+ "sortBy": []
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "resources_list.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "region.keyword",
+ "id": "3",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "VRMNYTR4k"
+ },
+ "hide": false,
+ "metrics": [
+ {
+ "field": "resources",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "account.keyword:$Account AND policy.keyword:\"ec2_idle\"",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "EC2 Idle: : $Account",
+ "transformations": [
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Sum": true
+ },
+ "indexByName": {
+ "Sum": 2,
+ "region.keyword": 0,
+ "resources_list.keyword": 1
+ },
+ "renameByName": {
+ "region.keyword": "Region",
+ "resources_list.keyword": " instance id |user | cost($) | state | instance type | launch time | name | cluster owned"
+ }
+ }
+ }
+ ],
+ "type": "table"
+ }
+ ],
+ "title": "Policy: EC2-Idle >= 2 days",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 18
+ },
+ "id": 82,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "2B-r4LTVz"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1,
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 24,
+ "x": 0,
+ "y": 18
+ },
+ "id": 88,
+ "options": {
+ "barRadius": 0,
+ "barWidth": 0.97,
+ "groupWidth": 0.7,
+ "legend": {
+ "calcs": [],
+ "displayMode": "hidden",
+ "placement": "right"
+ },
+ "orientation": "auto",
+ "showValue": "always",
+ "stacking": "none",
+ "tooltip": {
+ "mode": "multi",
+ "sort": "asc"
+ },
+ "xTickLabelRotation": 0,
+ "xTickLabelSpacing": 0
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "instance_type.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "2B-r4LTVz"
+ },
+ "metrics": [
+ {
+ "field": "instance_count",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "account.keyword: $Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "InstancesTypes: $Account",
+ "transformations": [
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "desc": true,
+ "field": "Sum"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "barchart"
+ }
+ ],
+ "title": "InstanceTypes",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 19
+ },
+ "id": 78,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "X3gKVsk4z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "color-text",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "BucketName | CreateDate | Age | Policy"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 603
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Account"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 103
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Count"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 181
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 24,
+ "x": 0,
+ "y": 15
+ },
+ "id": 65,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true,
+ "sortBy": []
+ },
+ "pluginVersion": "8.5.9",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "empty_buckets.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "account.keyword",
+ "id": "3",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "X3gKVsk4z"
+ },
+ "metrics": [
+ {
+ "field": "count",
+ "id": "1",
+ "type": "min"
+ }
+ ],
+ "query": "account.keyword:$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Empty Buckets: $Account",
+ "transformations": [
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Count": true
+ },
+ "indexByName": {},
+ "renameByName": {
+ "account.keyword": "Account",
+ "empty_buckets.keyword": "BucketName | CreateDate | Age | Policy"
+ }
+ }
+ },
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "field": "Account"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "table"
+ }
+ ],
+ "title": "Policy: Empty Bucket",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 20
+ },
+ "id": 69,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "ZL9Ev7k4k"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 16
+ },
+ "id": 73,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "8.5.9",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "zombie_snapshots.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "account.keyword",
+ "id": "3",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "region.keyword",
+ "id": "4",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "10"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "ZL9Ev7k4k"
+ },
+ "metrics": [
+ {
+ "field": "count",
+ "id": "1",
+ "type": "max"
+ }
+ ],
+ "query": "account.keyword=$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Zombie Snapshots",
+ "transformations": [
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Max": true
+ },
+ "indexByName": {},
+ "renameByName": {
+ "account.keyword": "Account",
+ "region.keyword": "Region",
+ "zombie_snapshots.keyword": "SnapshotId| Name | User | VolumeSize | Policy"
+ }
+ }
+ },
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "field": "Account"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "ZL9Ev7k4k"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "color-text",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "NatGatewayId | user | VpcId | Policy"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 585
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 71,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true,
+ "sortBy": []
+ },
+ "pluginVersion": "8.5.9",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "zombie_nat_gateways.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "region.keyword",
+ "id": "3",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "10"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "account.keyword",
+ "id": "4",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "10"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "ZL9Ev7k4k"
+ },
+ "metrics": [
+ {
+ "field": "count",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "account.keyword=$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Zombie NatGateways: $Account",
+ "transformations": [
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Max": true
+ },
+ "indexByName": {},
+ "renameByName": {
+ "account.keyword": "Account",
+ "region.keyword": "Region",
+ "zombie_nat_gateways.keyword": "NatGatewayId | user | VpcId | Policy"
+ }
+ }
+ },
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "field": "Account"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "ZL9Ev7k4k"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "zombie_elastic_ips.keyword"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 927
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "account.keyword"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 207
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "AllocationId | Name | PublicIp | Policy"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 567
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 24,
+ "x": 0,
+ "y": 32
+ },
+ "id": 67,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true,
+ "sortBy": []
+ },
+ "pluginVersion": "8.5.9",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "zombie_elastic_ips.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "account.keyword",
+ "id": "3",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "region.keyword",
+ "id": "4",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "ZL9Ev7k4k"
+ },
+ "metrics": [
+ {
+ "field": "count",
+ "id": "1",
+ "type": "max"
+ }
+ ],
+ "query": "account.keyword=$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Zombie ElasticIps",
+ "transformations": [
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "field": "account.keyword"
+ }
+ ]
+ }
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Max": true
+ },
+ "indexByName": {},
+ "renameByName": {
+ "account.keyword": "Account",
+ "region.keyword": "Region",
+ "zombie_elastic_ips.keyword": "AllocationId | Name | PublicIp | Policy"
+ }
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "X3gKVsk4z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "color-text",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 43
+ },
+ "id": 63,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "8.5.9",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "empty_roles.keyword",
+ "id": "3",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "account.keyword",
+ "id": "4",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "X3gKVsk4z"
+ },
+ "metrics": [
+ {
+ "field": "count",
+ "id": "1",
+ "type": "max"
+ }
+ ],
+ "query": "account.keyword=$Account",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Empty Roles",
+ "transformations": [
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Max": true
+ },
+ "indexByName": {},
+ "renameByName": {
+ "account.keyword": "Account",
+ "empty_roles.keyword": "RoleName | Policy"
+ }
+ }
+ },
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "field": "Account"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "table"
+ }
+ ],
+ "title": "Policy: Zombie & Empty Resources",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 21
+ },
+ "id": 24,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "D1FohzpVk"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 100
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 135
+ },
+ "id": 22,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": true
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Email.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "D1FohzpVk"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Budget=$Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Cost Per Email: $Account",
+ "type": "table"
+ }
+ ],
+ "title": "Email",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 22
+ },
+ "id": 34,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "eszb2ktVk"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 100
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 22
+ },
+ "id": 35,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Name.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "1",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "eszb2ktVk"
+ },
+ "metrics": [
+ {
+ "field": "Cost",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "Budget.keyword: $Account AND !NoTagKey AND !\"PERF-DEPT-REFUND\" AND !\"PERF-SCALE-REFUND\" AND !\"PSAP-REFUND\"",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Cost Per Resource Names: $Account",
+ "type": "table"
+ }
+ ],
+ "title": "Resource Names",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "id": 45,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "gSgjheqnz"
+ },
+ "description": "Spreadsheet tags",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "align": "center",
+ "displayMode": "color-text",
+ "filterable": false,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "MissingTags"
+ },
+ "properties": [
+ {
+ "id": "custom.width",
+ "value": 552
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 4,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "id": 43,
+ "links": [
+ {
+ "title": "User tags",
+ "url": "https://docs.google.com/spreadsheets/d/1KEFd1e1z03c9Ai7LyX7IoBtLGTiKhSKEzLMuwUTAQUY/edit#gid=0"
+ }
+ ],
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "allValues"
+ ],
+ "show": false
+ },
+ "showHeader": true,
+ "sortBy": []
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "gSgjheqnz"
+ },
+ "metrics": [
+ {
+ "id": "1",
+ "settings": {
+ "size": "500"
+ },
+ "type": "raw_data"
+ }
+ ],
+ "query": "",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Pef-Dept: missing tags ",
+ "transformations": [
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "_id": true,
+ "_index": true,
+ "_type": true,
+ "highlight": true,
+ "sort": true,
+ "timestamp": true
+ },
+ "indexByName": {},
+ "renameByName": {}
+ }
+ },
+ {
+ "id": "groupBy",
+ "options": {
+ "fields": {
+ "MissingTags": {
+ "aggregations": [
+ "last"
+ ],
+ "operation": "aggregate"
+ },
+ "User": {
+ "aggregations": [],
+ "operation": "groupby"
+ }
+ }
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "cn0DL467z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "color-text",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 53,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "cn0DL467z"
+ },
+ "metrics": [
+ {
+ "id": "1",
+ "settings": {
+ "size": "500"
+ },
+ "type": "raw_data"
+ }
+ ],
+ "query": "",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "PSAP: missing tags ",
+ "transformations": [
+ {
+ "id": "filterFieldsByName",
+ "options": {
+ "include": {
+ "names": [
+ "MissingTags",
+ "User"
+ ]
+ }
+ }
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {},
+ "indexByName": {
+ "MissingTags": 1,
+ "User": 0
+ },
+ "renameByName": {}
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "pStF2V67k"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "color-text",
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 24,
+ "x": 0,
+ "y": 33
+ },
+ "id": 55,
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "pStF2V67k"
+ },
+ "metrics": [
+ {
+ "id": "1",
+ "settings": {
+ "size": "500"
+ },
+ "type": "raw_data"
+ }
+ ],
+ "query": "",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Perf-Scale: missing tags ",
+ "transformations": [
+ {
+ "id": "filterFieldsByName",
+ "options": {
+ "include": {
+ "names": [
+ "MissingTags",
+ "User"
+ ]
+ }
+ }
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {},
+ "indexByName": {
+ "MissingTags": 1,
+ "User": 0
+ },
+ "renameByName": {}
+ }
+ }
+ ],
+ "type": "table"
+ }
+ ],
+ "title": "User: missing tags",
+ "type": "row"
+ }
+ ],
+ "refresh": false,
+ "schemaVersion": 36,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "allValue": "PERF-DEPT, PSAP, PERFSCALE",
+ "current": {
+ "selected": true,
+ "text": [
+ "PERF-DEPT"
+ ],
+ "value": [
+ "PERF-DEPT"
+ ]
+ },
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "7VSc2zpVz"
+ },
+ "definition": "{\"find\":\"terms\",\"field\":\"Budget.keyword\"}",
+ "description": "Account",
+ "hide": 0,
+ "includeAll": true,
+ "label": "Account",
+ "multi": true,
+ "name": "Account",
+ "options": [],
+ "query": "{\"find\":\"terms\",\"field\":\"Budget.keyword\"}",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-7d",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "utc",
+ "title": "Cost Explorer-Main",
+ "uid": "3vqJes5Vk",
+ "version": 60,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/grafana/clouds/aws/payer_account_billing_reports.json b/grafana/clouds/aws/payer_account_billing_reports.json
new file mode 100644
index 00000000..2e121d39
--- /dev/null
+++ b/grafana/clouds/aws/payer_account_billing_reports.json
@@ -0,0 +1,1228 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "Forecasting for the Perf-Dept, Openshift-PerfScale, OPenshift-PSAP",
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 130,
+ "iteration": 1683607032287,
+ "links": [],
+ "liveNow": true,
+ "panels": [
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": [],
+ "noValue": "0",
+ "unit": "currencyUSD"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Balance"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#b157ff",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 0
+ },
+ "id": 38,
+ "options": {
+ "displayLabels": [
+ "value"
+ ],
+ "legend": {
+ "displayMode": "table",
+ "placement": "right",
+ "values": [
+ "value"
+ ]
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "sum"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "asc"
+ }
+ },
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Account.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "Actual",
+ "id": "3",
+ "settings": {
+ "min_doc_count": "1",
+ "missing": "0",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "SavingsPlanCost",
+ "id": "4",
+ "settings": {
+ "min_doc_count": "1",
+ "missing": "0",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ },
+ {
+ "field": "PremiumSupportFee",
+ "id": "5",
+ "settings": {
+ "min_doc_count": "1",
+ "missing": "0",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "metrics": [
+ {
+ "field": "Budget",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "(Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName: $CloudName AND filter_date.keyword: $Month)",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Total $CloudName/ $Month",
+ "transformations": [
+ {
+ "id": "calculateField",
+ "options": {
+ "alias": "Balance",
+ "binary": {
+ "left": "Sum",
+ "operator": "-",
+ "reducer": "sum",
+ "right": "Actual"
+ },
+ "mode": "binary",
+ "reduce": {
+ "reducer": "sum"
+ },
+ "replaceFields": false
+ }
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Account.keyword": false,
+ "Actual": true,
+ "CurrentCost": true,
+ "PremiumSupportFee": false,
+ "Sum": false
+ },
+ "indexByName": {},
+ "renameByName": {
+ "Balance": "",
+ "Remaining Cost": "",
+ "Sum": "Budget"
+ }
+ }
+ },
+ {
+ "id": "calculateField",
+ "options": {
+ "alias": "Actual",
+ "binary": {
+ "left": "Budget",
+ "operator": "-",
+ "reducer": "sum",
+ "right": "Balance"
+ },
+ "mode": "binary",
+ "reduce": {
+ "reducer": "sum"
+ },
+ "replaceFields": false
+ }
+ },
+ {
+ "id": "calculateField",
+ "options": {
+ "alias": "Total",
+ "mode": "reduceRow",
+ "reduce": {
+ "include": [
+ "SavingsPlanCost",
+ "PremiumSupportFee",
+ "Actual"
+ ],
+ "reducer": "sum"
+ }
+ }
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Actual": true,
+ "PremiumSupportFee": true,
+ "SavingsPlanCost": true
+ },
+ "indexByName": {},
+ "renameByName": {}
+ }
+ }
+ ],
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": [],
+ "unit": "currencyUSD"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 0
+ },
+ "id": 40,
+ "options": {
+ "displayLabels": [
+ "value"
+ ],
+ "legend": {
+ "displayMode": "list",
+ "placement": "right",
+ "values": [
+ "value"
+ ]
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "CloudName.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "missing": "0",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "metrics": [
+ {
+ "field": "Actual",
+ "id": "1",
+ "settings": {
+ "missing": "0"
+ },
+ "type": "sum"
+ },
+ {
+ "field": "SavingsPlanCost",
+ "id": "3",
+ "settings": {
+ "missing": "0"
+ },
+ "type": "sum"
+ },
+ {
+ "field": "PremiumSupportFee",
+ "id": "4",
+ "settings": {
+ "missing": "0"
+ },
+ "type": "sum"
+ }
+ ],
+ "query": "CloudName.keyword: $CloudName AND Owner.keyword: $Owner AND CostCenter: $CostCenter AND Account.keyword: $Account AND filter_date.keyword: $Month",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Actual $CloudName: Cost / $Month",
+ "transformations": [
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {},
+ "indexByName": {},
+ "renameByName": {
+ "CloudName.keyword": "CloudName",
+ "Sum Actual": "Actual",
+ "Sum PremiumSupportFee": "Support",
+ "Sum SavingsPlanCost": "Savings"
+ }
+ }
+ },
+ {
+ "id": "calculateField",
+ "options": {
+ "alias": "Total",
+ "mode": "reduceRow",
+ "reduce": {
+ "include": [
+ "Actual",
+ "Savings",
+ "Support"
+ ],
+ "reducer": "sum"
+ }
+ }
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Actual": true,
+ "Savings": true,
+ "Support": true
+ },
+ "indexByName": {},
+ "renameByName": {}
+ }
+ }
+ ],
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1,
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Month.keyword"
+ },
+ "properties": [
+ {
+ "id": "custom.axisWidth",
+ "value": 2
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 24,
+ "x": 0,
+ "y": 11
+ },
+ "id": 41,
+ "options": {
+ "barRadius": 0,
+ "barWidth": 0.97,
+ "groupWidth": 0.7,
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "table",
+ "placement": "right"
+ },
+ "orientation": "auto",
+ "showValue": "always",
+ "stacking": "none",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ },
+ "xTickLabelMaxLength": 0,
+ "xTickLabelRotation": 0,
+ "xTickLabelSpacing": 0
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "filter_date.keyword",
+ "id": "11",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "metrics": [
+ {
+ "field": "Actual",
+ "id": "1",
+ "settings": {
+ "missing": "0"
+ },
+ "type": "sum"
+ },
+ {
+ "field": "Budget",
+ "id": "8",
+ "settings": {
+ "missing": "0"
+ },
+ "type": "sum"
+ },
+ {
+ "field": "Forecast",
+ "id": "9",
+ "settings": {
+ "missing": "0"
+ },
+ "type": "sum"
+ },
+ {
+ "field": "PremiumSupportFee",
+ "id": "12",
+ "settings": {
+ "missing": "0"
+ },
+ "type": "sum"
+ },
+ {
+ "field": "SavingsPlanCost",
+ "id": "13",
+ "settings": {
+ "missing": "0"
+ },
+ "type": "sum"
+ }
+ ],
+ "query": "Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName: $CloudName AND filter_date.keyword: $Month",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": " $Account Forecast, Budget, Actual / $Month",
+ "transformations": [
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Budget": false,
+ "TotalValues": true
+ },
+ "indexByName": {
+ "Account.keyword": 0,
+ "Budget": 3,
+ "ForecastCost": 2,
+ "Month.keyword": 1,
+ "Sum": 4
+ },
+ "renameByName": {
+ "Account.keyword": "Account",
+ "Budget": "",
+ "ForecastCost": "Estimated Cost",
+ "Month.keyword": "Month",
+ "Sum": "CurrentCost",
+ "Sum Actual": "Usage",
+ "Sum Budget": "Budget",
+ "Sum CurrentCost": "Actual",
+ "Sum Forecast": "Forecast",
+ "Sum ForecastCost": "ForeCast",
+ "Sum PremiumSupportFee": "PremiumSupportFee",
+ "Sum SavingsPlanCost": "SavingsPlanCost",
+ "filter_date.keyword": "Month"
+ }
+ }
+ },
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "desc": false,
+ "field": "Month"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "barchart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisGridShow": true,
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1,
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Sum"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "light-yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "id": 29,
+ "options": {
+ "barRadius": 0,
+ "barWidth": 0.97,
+ "groupWidth": 0.7,
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "table",
+ "placement": "right"
+ },
+ "orientation": "vertical",
+ "showValue": "always",
+ "stacking": "none",
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ },
+ "xTickLabelRotation": 0,
+ "xTickLabelSpacing": 0
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Account.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "metrics": [
+ {
+ "field": "Actual",
+ "id": "1",
+ "settings": {
+ "missing": "0"
+ },
+ "type": "sum"
+ }
+ ],
+ "query": "(Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName.keyword: $CloudName AND filter_date.keyword: $Month)",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "Current Usage / $Account - Till Now \\ $Month",
+ "transformations": [
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "field": "Sum"
+ }
+ ]
+ }
+ }
+ ],
+ "transparent": true,
+ "type": "barchart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1,
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 34
+ },
+ "id": 35,
+ "options": {
+ "barRadius": 0,
+ "barWidth": 0.97,
+ "groupWidth": 0.7,
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "table",
+ "placement": "right"
+ },
+ "orientation": "vertical",
+ "showValue": "always",
+ "stacking": "none",
+ "text": {},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ },
+ "xTickLabelRotation": 0,
+ "xTickLabelSpacing": 0
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Account.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "metrics": [
+ {
+ "field": "AllocatedBudget",
+ "id": "1",
+ "type": "max"
+ }
+ ],
+ "query": "(Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName.keyword: $CloudName)",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "CY23 Budget / $Account",
+ "transformations": [
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "field": "Max"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "barchart"
+ },
+ {
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "description": "IBM has only 1 current Month Forecast",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisGridShow": true,
+ "axisLabel": "",
+ "axisPlacement": "left",
+ "axisSoftMin": 0,
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1,
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ },
+ "mappings": [],
+ "noValue": "0",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "currencyUSD"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Sum"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 42
+ },
+ "id": 32,
+ "options": {
+ "barRadius": 0,
+ "barWidth": 0.97,
+ "groupWidth": 0.7,
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "table",
+ "placement": "right"
+ },
+ "orientation": "vertical",
+ "showValue": "always",
+ "stacking": "none",
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ },
+ "xTickLabelRotation": 0,
+ "xTickLabelSpacing": 0
+ },
+ "pluginVersion": "8.5.14",
+ "targets": [
+ {
+ "alias": "",
+ "bucketAggs": [
+ {
+ "field": "Account.keyword",
+ "id": "2",
+ "settings": {
+ "min_doc_count": "1",
+ "order": "desc",
+ "orderBy": "_term",
+ "size": "0"
+ },
+ "type": "terms"
+ }
+ ],
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "metrics": [
+ {
+ "field": "Forecast",
+ "id": "1",
+ "type": "sum"
+ }
+ ],
+ "query": "(Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName.keyword: $CloudName)",
+ "refId": "A",
+ "timeField": "timestamp"
+ }
+ ],
+ "title": "ForeCasted next 12 M / $Account",
+ "transformations": [
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {},
+ "indexByName": {},
+ "renameByName": {
+ "Account.keyword": "Account",
+ "Sum": "Sum"
+ }
+ }
+ },
+ {
+ "id": "sortBy",
+ "options": {
+ "fields": {},
+ "sort": [
+ {
+ "field": "Sum"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "barchart"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 36,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": [
+ "Shai"
+ ],
+ "value": [
+ "Shai"
+ ]
+ },
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "definition": "{\"find\":\"terms\", \"field\":\"Owner.keyword\"}",
+ "hide": 0,
+ "includeAll": true,
+ "label": "Owner",
+ "multi": true,
+ "name": "Owner",
+ "options": [],
+ "query": "{\"find\":\"terms\", \"field\":\"Owner.keyword\"}",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": [
+ "All"
+ ],
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "definition": "{\"find\":\"terms\", \"field\":\"CostCenter\", \"query\": \"Owner.keyword: $Owner\"}",
+ "description": "CostCategory",
+ "hide": 0,
+ "includeAll": true,
+ "label": "CostCenter",
+ "multi": true,
+ "name": "CostCenter",
+ "options": [],
+ "query": "{\"find\":\"terms\", \"field\":\"CostCenter\", \"query\": \"Owner.keyword: $Owner\"}",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": [
+ "All"
+ ],
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "definition": "{\"find\":\"terms\", \"field\":\"CloudName.keyword\", \"query\": \"CostCenter: $CostCenter AND Owner.keyword: $Owner\"}",
+ "description": "CloudName",
+ "hide": 0,
+ "includeAll": true,
+ "label": "CloudName",
+ "multi": true,
+ "name": "CloudName",
+ "options": [],
+ "query": "{\"find\":\"terms\", \"field\":\"CloudName.keyword\", \"query\": \"CostCenter: $CostCenter AND Owner.keyword: $Owner\"}",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "allValue": "",
+ "current": {
+ "selected": false,
+ "text": [
+ "All"
+ ],
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "definition": "{\"find\":\"terms\", \"field\":\"Account.keyword\", \"query\": \"CloudName.keyword: $CloudName AND CostCenter: $CostCenter AND Owner.keyword: $Owner AND !(AccountId.keyword=\\\"\\\") \" }",
+ "description": "Account",
+ "hide": 0,
+ "includeAll": true,
+ "label": "Account",
+ "multi": true,
+ "name": "Account",
+ "options": [],
+ "query": "{\"find\":\"terms\", \"field\":\"Account.keyword\", \"query\": \"CloudName.keyword: $CloudName AND CostCenter: $CostCenter AND Owner.keyword: $Owner AND !(AccountId.keyword=\\\"\\\") \" }",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "allValue": "",
+ "current": {
+ "selected": false,
+ "text": [
+ "All"
+ ],
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "definition": "{\"find\":\"terms\", \"field\":\"AccountId.keyword\", \"query\": \"CloudName.keyword: $CloudName AND CostCenter: $CostCenter AND Owner.keyword: $Owner AND Account.keyword: $Account\" }",
+ "description": "AccountId",
+ "hide": 0,
+ "includeAll": true,
+ "label": "AccountId",
+ "multi": true,
+ "name": "AccountId",
+ "options": [],
+ "query": "{\"find\":\"terms\", \"field\":\"AccountId.keyword\", \"query\": \"CloudName.keyword: $CloudName AND CostCenter: $CostCenter AND Owner.keyword: $Owner AND Account.keyword: $Account\" }",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "allValue": "",
+ "current": {
+ "selected": true,
+ "text": [
+ "All"
+ ],
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": {
+ "type": "elasticsearch",
+ "uid": "NvnUAH04z"
+ },
+ "definition": "{\"find\":\"terms\", \"field\":\"filter_date.keyword\", \"query\": \"Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName: $CloudName\"}",
+ "description": "Month",
+ "hide": 0,
+ "includeAll": true,
+ "label": "Month",
+ "multi": true,
+ "name": "Month",
+ "options": [],
+ "query": "{\"find\":\"terms\", \"field\":\"filter_date.keyword\", \"query\": \"Account.keyword: $Account AND CostCenter: $CostCenter AND CloudName: $CloudName\"}",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now/y",
+ "to": "now/y"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ]
+ },
+ "timezone": "utc",
+ "title": "Payer Account Billing Reports",
+ "uid": "ckeZn1o4k",
+ "version": 59,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/iam/clouds/aws/CloudGovernanceDeletePolicy.json b/iam/clouds/aws/CloudGovernanceDeletePolicy.json
new file mode 100644
index 00000000..7bca5146
--- /dev/null
+++ b/iam/clouds/aws/CloudGovernanceDeletePolicy.json
@@ -0,0 +1,169 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "CostExplorer",
+ "Effect": "Allow",
+ "Action": [
+ "ce:GetCostAndUsage",
+ "ce:GetCostForecast"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "EC2AccountLevel",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DeleteTags",
+ "ec2:CreateTags"
+ ],
+ "Resource": [
+ "arn:aws:ec2:*:account_id:instance/*",
+ "arn:aws:ec2:*:account_id:route-table/*",
+ "arn:aws:ec2:*:account_id:network-interface/*",
+ "arn:aws:ec2:*:account_id:internet-gateway/*",
+ "arn:aws:ec2:*:account_id:dhcp-options/*",
+ "arn:aws:ec2:*::snapshot/*",
+ "arn:aws:ec2:*:account_id:vpc/*",
+ "arn:aws:ec2:*:account_id:elastic-ip/*",
+ "arn:aws:ec2:*:account_id:network-acl/*",
+ "arn:aws:ec2:*:account_id:natgateway/*",
+ "arn:aws:ec2:*:account_id:security-group/*",
+ "arn:aws:ec2:*:account_id:vpc-endpoint/*",
+ "arn:aws:ec2:*:account_id:subnet/*",
+ "arn:aws:ec2:*:account_id:volume/*",
+ "arn:aws:ec2:*::image/*"
+ ]
+ },
+ {
+ "Sid": "EC2ResourceLevel",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DeregisterImage",
+ "ec2:DeleteSubnet",
+ "ec2:DeleteSnapshot",
+ "ec2:DescribeAddresses",
+ "ec2:DescribeInstances",
+ "ec2:DeleteVpcEndpoints",
+ "ec2:DeleteVpcPeeringConnection",
+ "autoscaling:DescribeLaunchConfigurations",
+ "ec2:DescribeRegions",
+ "ec2:CreateImage",
+ "ec2:CreateVpc",
+ "ec2:DescribeDhcpOptions",
+ "ec2:DescribeSnapshots",
+ "ec2:DeleteRouteTable",
+ "ec2:DescribeInternetGateways",
+ "ec2:DeleteVolume",
+ "ec2:DescribeNetworkInterfaces",
+ "autoscaling:DescribeAutoScalingGroups",
+ "ec2:DescribeVolumes",
+ "ec2:DeleteInternetGateway",
+ "ec2:DescribeNetworkAcls",
+ "ec2:DescribeRouteTables",
+ "ec2:DeleteNetworkAcl",
+ "ec2:ReleaseAddress",
+ "ec2:AssociateDhcpOptions",
+ "ec2:TerminateInstances",
+ "ec2:DetachNetworkInterface",
+ "ec2:DescribeTags",
+ "ec2:DescribeVpcPeeringConnections",
+ "ec2:ModifyNetworkInterfaceAttribute",
+ "ec2:DeleteNetworkInterface",
+ "ec2:DetachInternetGateway",
+ "ec2:DescribeNatGateways",
+ "cloudwatch:GetMetricStatistics",
+ "ec2:StopInstances",
+ "ec2:DisassociateRouteTable",
+ "ec2:DescribeSecurityGroups",
+ "ec2:RevokeSecurityGroupIngress",
+ "ec2:DescribeImages",
+ "ec2:DescribeVpcs",
+ "ec2:DeleteSecurityGroup",
+ "ec2:DescribeInstanceTypes",
+ "ec2:DeleteDhcpOptions",
+ "ec2:DeleteNatGateway",
+ "ec2:DescribeVpcEndpoints",
+ "ec2:DeleteVpc",
+ "ec2:DescribeSubnets"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "LoadBalancer",
+ "Effect": "Allow",
+ "Action": [
+ "elasticloadbalancing:DeleteLoadBalancer",
+ "elasticloadbalancing:DescribeTags",
+ "elasticloadbalancing:AddTags",
+ "elasticloadbalancing:DescribeLoadBalancers"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "IAM",
+ "Effect": "Allow",
+ "Action": [
+ "iam:GetRole",
+ "iam:DeleteAccessKey",
+ "iam:DeleteGroup",
+ "iam:TagRole",
+ "iam:DeleteUserPolicy",
+ "iam:ListRoles",
+ "iam:DeleteUser",
+ "iam:ListUserPolicies",
+ "iam:CreateUser",
+ "iam:TagUser",
+ "sts:AssumeRole",
+ "iam:RemoveUserFromGroup",
+ "iam:GetUserPolicy",
+ "iam:ListAttachedRolePolicies",
+ "iam:ListUsers",
+ "iam:GetUser",
+ "iam:ListAccessKeys",
+ "iam:ListRolePolicies",
+ "iam:ListAccountAliases"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Pricing",
+ "Effect": "Allow",
+ "Action": "pricing:GetProducts",
+ "Resource": "*"
+ },
+ {
+ "Sid": "S3Bucket",
+ "Effect": "Allow",
+ "Action": [
+ "s3:PutObject",
+ "s3:GetObject",
+ "s3:ListAllMyBuckets",
+ "s3:CreateBucket",
+ "s3:ListBucket",
+ "s3:PutObjectTagging",
+ "s3:DeleteObject",
+ "s3:DeleteBucket",
+ "s3:putBucketTagging",
+ "s3:GetBucketTagging",
+ "s3:GetBucketLocation"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "CloudTrail",
+ "Effect": "Allow",
+ "Action": [
+ "cloudtrail:LookupEvents",
+ "cloudtrail:ListTrails"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "CloudWatch",
+ "Effect": "Allow",
+ "Action": "cloudwatch:GetMetricData",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/iam/clouds/aws/delete/CloudGovernanceEC2Policy b/iam/clouds/aws/delete/CloudGovernanceEC2Policy
index 5d6f676a..b7310a80 100644
--- a/iam/clouds/aws/delete/CloudGovernanceEC2Policy
+++ b/iam/clouds/aws/delete/CloudGovernanceEC2Policy
@@ -30,42 +30,53 @@
"Sid": "VisualEditor1",
"Effect": "Allow",
"Action": [
+ "ec2:DeregisterImage",
+ "ec2:DeleteSubnet",
+ "ec2:DeleteSnapshot",
"ec2:DescribeAddresses",
"ec2:DescribeInstances",
- "ec2:DescribeTags",
+ "ec2:DeleteVpcEndpoints",
+ "ec2:DeleteVpcPeeringConnection",
+ "autoscaling:DescribeLaunchConfigurations",
"ec2:DescribeRegions",
+ "ec2:CreateImage",
+ "ec2:CreateVpc",
"ec2:DescribeDhcpOptions",
- "ec2:DescribeNatGateways",
- "cloudwatch:GetMetricStatistics",
"ec2:DescribeSnapshots",
- "ec2:DescribeSecurityGroups",
- "ec2:DescribeImages",
+ "ec2:DeleteRouteTable",
"ec2:DescribeInternetGateways",
+ "ec2:DeleteVolume",
"ec2:DescribeNetworkInterfaces",
- "ec2:DescribeVpcs",
+ "autoscaling:DescribeAutoScalingGroups",
"ec2:DescribeVolumes",
- "ec2:DescribeVpcEndpoints",
- "ec2:DescribeSubnets",
+ "ec2:DeleteInternetGateway",
"ec2:DescribeNetworkAcls",
"ec2:DescribeRouteTables",
- "ec2:DeleteNatGateway",
- "ec2:DetachInternetGateway",
- "ec2:DeleteInternetGateway",
+ "ec2:DeleteNetworkAcl",
+ "ec2:ReleaseAddress",
"ec2:AssociateDhcpOptions",
- "ec2:DeleteDhcpOptions",
+ "ec2:TerminateInstances",
+ "ec2:DetachNetworkInterface",
+ "ec2:DescribeTags",
+ "ec2:DescribeVpcPeeringConnections",
+ "ec2:ModifyNetworkInterfaceAttribute",
+ "ec2:DeleteNetworkInterface",
+ "ec2:DetachInternetGateway",
+ "ec2:DescribeNatGateways",
+ "cloudwatch:GetMetricStatistics",
+ "ec2:StopInstances",
+ "ec2:DisassociateRouteTable",
+ "ec2:DescribeSecurityGroups",
"ec2:RevokeSecurityGroupIngress",
+ "ec2:DescribeImages",
+ "ec2:DescribeVpcs",
"ec2:DeleteSecurityGroup",
- "ec2:DeleteRouteTable",
- "ec2:DisassociateRouteTable",
- "ec2:ReleaseAddress",
- "ec2:DeleteSubnet",
+ "ec2:DescribeInstanceTypes",
+ "ec2:DeleteDhcpOptions",
+ "ec2:DeleteNatGateway",
+ "ec2:DescribeVpcEndpoints",
"ec2:DeleteVpc",
- "ec2:DeleteVpcEndpoints",
- "ec2:DetachNetworkInterface",
- "ec2:DeleteNetworkInterface",
- "ec2:ModifyNetworkInterfaceAttribute",
- "ec2:DeleteNetworkAcl",
- "ec2:createVpc"
+ "ec2:DescribeSubnets"
],
"Resource": "*"
}
diff --git a/iam/clouds/aws/delete/CloudGovernanceS3Policy b/iam/clouds/aws/delete/CloudGovernanceS3Policy
index 77c1afff..b22fb251 100644
--- a/iam/clouds/aws/delete/CloudGovernanceS3Policy
+++ b/iam/clouds/aws/delete/CloudGovernanceS3Policy
@@ -14,7 +14,8 @@
"s3:DeleteObject",
"s3:DeleteBucket",
"s3:putBucketTagging",
- "s3:GetBucketTagging"
+ "s3:GetBucketTagging",
+ "s3:GetBucketLocation"
],
"Resource": "*"
}
diff --git a/iam/clouds/aws/not_delete/CloudGovernanceEC2Policy b/iam/clouds/aws/not_delete/CloudGovernanceEC2Policy
index 8b27c651..09d89c17 100644
--- a/iam/clouds/aws/not_delete/CloudGovernanceEC2Policy
+++ b/iam/clouds/aws/not_delete/CloudGovernanceEC2Policy
@@ -33,6 +33,7 @@
"ec2:DescribeAddresses",
"ec2:DescribeInstances",
"ec2:DescribeTags",
+ "ec2:DescribeVpcPeeringConnections",
"ec2:DescribeRegions",
"ec2:DescribeDhcpOptions",
"ec2:DescribeNatGateways",
@@ -44,7 +45,10 @@
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeVpcs",
"ec2:DescribeVolumes",
+ "ec2:DescribeInstanceTypes",
+ "ec2:createVpc",
"ec2:DescribeVpcEndpoints",
+ "ec2:DeleteVpc",
"ec2:DescribeSubnets",
"ec2:DescribeNetworkAcls",
"ec2:DescribeRouteTables"
diff --git a/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadPolicy.json b/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadPolicy.json
new file mode 100644
index 00000000..a244a577
--- /dev/null
+++ b/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadPolicy.json
@@ -0,0 +1,44 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "VisualEditor0",
+ "Effect": "Allow",
+ "Action": [
+ "cur:*",
+ "ce:*",
+ "account:GetAccountInformation",
+ "aws-portal:ViewBilling",
+ "billing:GetBillingData",
+ "billing:GetBillingDetails",
+ "billing:GetBillingNotifications",
+ "billing:GetBillingPreferences",
+ "billing:GetCredits",
+ "billing:GetContractInformation",
+ "billing:GetIAMAccessPreference",
+ "billing:GetSellerOfRecord",
+ "billing:ListBillingViews",
+ "consolidatedbilling:ListLinkedAccounts",
+ "consolidatedbilling:GetAccountBillingRole",
+ "freetier:GetFreeTierAlertPreference",
+ "freetier:GetFreeTierUsage",
+ "invoicing:GetInvoiceEmailDeliveryPreferences",
+ "invoicing:GetInvoicePDF",
+ "invoicing:ListInvoiceSummaries",
+ "payments:GetPaymentInstrument",
+ "payments:GetPaymentStatus",
+ "payments:ListPaymentPreferences",
+ "purchase-orders:GetPurchaseOrder",
+ "purchase-orders:ViewPurchaseOrders",
+ "purchase-orders:ListPurchaseOrderInvoices",
+ "purchase-orders:ListPurchaseOrders",
+ "tax:GetTaxRegistrationDocument",
+ "tax:GetTaxInheritance",
+ "tax:ListTaxRegistrations",
+ "savingsplans:Describe*",
+ "savingsplans:List*"
+ ],
+ "Resource": "*"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadRole.json b/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadRole.json
new file mode 100644
index 00000000..49005e92
--- /dev/null
+++ b/iam/clouds/aws/payer_roles/CloudGovernanceCostExplorerReadRole.json
@@ -0,0 +1,14 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": [
+ "arn:aws:iam::452958939641:user/athiruma"
+ ]
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/iam/clouds/aws/payer_roles/README.md b/iam/clouds/aws/payer_roles/README.md
new file mode 100644
index 00000000..2c3f768c
--- /dev/null
+++ b/iam/clouds/aws/payer_roles/README.md
@@ -0,0 +1,57 @@
+## Create IAM Assume Role
+
+# From AWS Console
+
+### Go to **IAM** Service
+
+#### Create a IAM Policy
+
+1. Click on **Policies**
+2. Click on **Create Policy**
+3. Switch to JSON tab and paste the contents of CloudGovernanceCostExplorerReadPolicy.
+4. Click on Next: Tags
+5. Click on Next: Review
+6. Enter the Policy name as CloudGovernanceCostExplorerReadPolicy
+7. Click on **Create Policy**. ( Policy will be created and listed on Policies )
+
+#### Create the IAM Role
+
+1. Click on **Roles**
+2. Click on **Create Role**
+3. Select the **Custom trust policy** from Trusted identity type.
+4. Paste the contents of *CloudGovernanceCostExplorerReadRole.json* file. \
+ Note: Replace username with **IAM User** name, and AccountId with the **AWS AccountId**
+5. Select the **CloudGovernanceCostExplorerReadPolicy** from the list of policies.
+6. Enter the RoleName as **CloudGovernanceCostExplorerReadRole**
+7. Click on create role. ( Role will be created and listed on roles )
+
+
+## From Terraform provider
+
+Clone our GitHub repository or copy the folder of **payer_roles**.
+if you clone repo path: iam/clouds/aws/payer_roles/terrafom_create_role/
+else path: payer_roles/terrafom_create_role/main.tf
+
+Go to folder terraform_create_role and open the terminal.
+
+Configure the aws cli credentials
+```commandline
+aws configure
+```
+
+Install [Terraform](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli) if you don't have it previously
+
+Run the following commands to create the IAM Role and attach policy to it.
+```commandline
+terraform init
+terraform apply
+```
+
+Note: Replace username with **IAM User** name, and AccountId with the **AWS AccountId** on the CloudGovernanceCostExplorerReadRole
+
+Then share your **AccountId** and **Role Name** to the users for accessing the CostExplorer.
+
+To delete the IAM policy through terraform.
+```commandline
+terraform delete
+```
\ No newline at end of file
diff --git a/iam/clouds/aws/payer_roles/terrafom_create_role/main.tf b/iam/clouds/aws/payer_roles/terrafom_create_role/main.tf
new file mode 100644
index 00000000..b3e7dd03
--- /dev/null
+++ b/iam/clouds/aws/payer_roles/terrafom_create_role/main.tf
@@ -0,0 +1,24 @@
+
+provider "aws" {
+
+}
+
+output "role_arn" {
+ value = aws_iam_role.cloud_governance_ce_read_role.arn
+}
+
+output "role_name" {
+ value = aws_iam_role.cloud_governance_ce_read_role.name
+}
+
+resource "aws_iam_role" "cloud_governance_ce_read_role" {
+
+ name = "CloudGovernanceCostExplorerReadRole"
+
+ assume_role_policy = file("./../CloudGovernanceCostExplorerReadRole.json")
+ inline_policy {
+ name = "CloudGovernanceCostExplorerReadPolicy"
+ policy = file("./../CloudGovernanceCostExplorerReadPolicy.json")
+ }
+
+}
diff --git a/images/CloudResourceOrchestration.jpg b/images/CloudResourceOrchestration.jpg
new file mode 100644
index 00000000..bc9b46cb
Binary files /dev/null and b/images/CloudResourceOrchestration.jpg differ
diff --git a/images/jenkins/add_creds.png b/images/jenkins/add_creds.png
new file mode 100644
index 00000000..b6af48a1
Binary files /dev/null and b/images/jenkins/add_creds.png differ
diff --git a/images/jenkins/jenkins_config_file.png b/images/jenkins/jenkins_config_file.png
new file mode 100644
index 00000000..7414b002
Binary files /dev/null and b/images/jenkins/jenkins_config_file.png differ
diff --git a/images/jenkins/manage_jenkins.png b/images/jenkins/manage_jenkins.png
new file mode 100644
index 00000000..07c00312
Binary files /dev/null and b/images/jenkins/manage_jenkins.png differ
diff --git a/images/jenkins/manage_nodes.png b/images/jenkins/manage_nodes.png
new file mode 100644
index 00000000..a601d85c
Binary files /dev/null and b/images/jenkins/manage_nodes.png differ
diff --git a/images/jenkins/new_node.png b/images/jenkins/new_node.png
new file mode 100644
index 00000000..99b94cfd
Binary files /dev/null and b/images/jenkins/new_node.png differ
diff --git a/images/jenkins/slave_node.png b/images/jenkins/slave_node.png
new file mode 100644
index 00000000..cdf686d7
Binary files /dev/null and b/images/jenkins/slave_node.png differ
diff --git a/jenkins/README.md b/jenkins/README.md
new file mode 100644
index 00000000..b457e540
--- /dev/null
+++ b/jenkins/README.md
@@ -0,0 +1,142 @@
+## Configure Jenkins slave to master
+
+### Install Java-11 based on OS
+rhel8
+```commandline
+sudo yum install java-11-openjdk-devel
+```
+amazon-linux
+```commandline
+yum install java-11-amazon-corretto-devel.x86_64
+```
+
+### Install Docker on CentOS/ Fedora
+Centos: https://docs.docker.com/engine/install/centos/
+Fedora: https://docs.docker.com/engine/install/fedora/
+
+### Installing on Fedora
+```
+sudo dnf -y install dnf-plugins-core
+sudo dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
+sudo dnf install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+sudo systemctl start docker
+```
+
+### Add Jenkins user
+```commandline
+useradd jenkins -U -s /bin/bash
+```
+Set password
+```commandline
+passwd jenkins
+```
+ex: test123
+
+### Add Jenkins to the sudoers file
+
+$ vi /etc/sudoers
+```commandline
+jenkins ALL=(ALL) NOPASSWD: ALL
+```
+
+### Create a Key-Pair
+```commandline
+ssh-keygen
+cat .ssh/id_rsa.pub >> .ssh/authorized_keys
+```
+
+### Connect to Jenkins Master with your node
+
+Open Jenkins master
+1. Manage Jenkins
+![Manage Jenkins](../images/jenkins/manage_jenkins.png)
+2. Manage Nodes and Clouds
+![ManageNodes](../images/jenkins/manage_nodes.png)
+3. New Node
+![NewNode](../images/jenkins/new_node.png)
+4. Add details
+![Details](../images/jenkins/slave_node.png)
+5. Configure Node
+![Config](../images/jenkins/jenkins_config_file.png)
+
+Click on add to add jenkins user and private_key
+![PrivateKey](../images/jenkins/add_creds.png)
+
+
+Give permissions to **jenkins** user to run docker daemon
+```commandline
+sudo chown jenkins:jenkins /var/run/docker.sock
+```
+Check docker working from jenkins user
+```commandline
+sudo su - jenkins
+docker images
+```
+
+Now you are ready to run the cloud-governance policies
+
+### Run the ElasticSearch, Grafana, Kibana as a container within the same network
+
+#### Using the docker engine
+```commandline
+# detached mode
+docker-compose -f docker_compose_file_path up -d
+# down the containers
+docker-compose -f docker_compose_file_path down
+```
+
+#### Using the podman
+
+create elasticsearch & grafana local persistence Directories
+
+Create and Allow Permissions
+```commandline
+CLOUD_GOVERNANCE_PATH=""
+mkdir -p $CLOUD_GOVERNANCE_PATH/grafana
+mkdir -p $CLOUD_GOVERNANCE_PATH/elasticsearch
+
+# Give permissions
+chmod 777 -R $CLOUD_GOVERNANCE_PATH/grafana
+chmod 777 $CLOUD_GOVERNANCE_PATH/elasticsearch
+```
+
+```commandline
+# Run the containers in pods
+podman play kube file.yml
+# Delete the containers in pods
+podman play kube --down file.yml
+```
+
+
+## How to create a new user and s3 bucket on AWS
+
+Goto IAM Services to create policy and user
+
+1. Create Policy named CloudGovernancePolicy
+ 2. Use [CloudGovernanceDeletePolicy.json](..%2Fiam%2Fclouds%2Faws%2FCloudGovernanceDeletePolicy.json) to create the policy
+2. Create cloud-governance-user
+3. Attach the CloudGovernancePolicy to cloud-governance-user.
+3. Create s3 bucket: cloud-governance-*
+
+
+#### How to pass aws credentials to jenkins job
+
+1. Create the json file with below format and save it in local env.
+2. Create/Update the jenkins file credential
+
+
+```commandline
+{
+"account1": {
+ "AWS_ACCESS_KEY_ID": "acces_key",
+ "AWS_SECRET_ACCESS_KEY" : "acees_secret",
+ "BUCKET" : "bucket_name"
+ },
+"account2": {
+ "AWS_ACCESS_KEY_ID": "acces_key",
+ "AWS_SECRET_ACCESS_KEY" : "acees_secret",
+ "BUCKET" : "bucket_name"
+ }
+}
+```
+
diff --git a/jenkins/Step_by_Step.md b/jenkins/Step_by_Step.md
new file mode 100644
index 00000000..dfdd32c6
--- /dev/null
+++ b/jenkins/Step_by_Step.md
@@ -0,0 +1,89 @@
+# How to create a new user for cloud-governance
+1. Create a IAM policy CloudGovernanceDeletePolicy
+ 1. Use [CloudGovernanceDeletePolicy.json](iam/clouds/aws/CloudGovernanceDeletePolicy.json) to create the policy
+2. Create **cloud-governance-user** and add the above created policy.
+3. Create s3 bucket to store policy results.
+
+# Adding jenkins slave
+1. Install java-11-jdk
+ ```commandline
+ sudo yum install java-11-openjdk-devel
+ ```
+2. Install docker on [Fedora](https://docs.docker.com/engine/install/fedora/)
+ ```commandline
+ sudo dnf -y install dnf-plugins-core
+ sudo dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
+ sudo dnf install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-compose
+ sudo systemctl start docker
+ ```
+3. Create Jenkins user
+ ```commandline
+ useradd jenkins -U -s /bin/bash
+ passwd jenkins
+ ```
+4. Add jenkins user to sudoers file
+ ```
+ $ vi /etc/sudoers
+ jenkins ALL=(ALL) NOPASSWD: ALL
+ ```
+5. Giving permissions to jenkins user to run docker container
+ ```
+ sudo chown jenkins:jenkins /var/run/docker.sock
+ ```
+
+6. Run the cloud_governance_stack [ ElasticSearch, Kibana, Grafana]
+ ```commandline
+ # using docker-compose.yml
+ # detached mode
+ docker-compose -f [docker_compose_file_path](jenkins/docker-compose.yml) up -d
+ # down the containers
+ docker-compose -f [docker_compose_file_path](jenkins/docker-compose.yml) down
+ ```
+
+# Connect Jenkins slave to master
+1. Goto Jenkins master.
+2. Click on **Manage Jenkins**
+3. CLick on **Manager Nodes and Clouds**
+4. Click on New Node
+5. Add details like node **Name**
+6. Configure Node
+ 1. Remote root directory: **/home/jenkins**
+ 2. LaunchMethod: Launch agents via ssh
+ 1. Host: **hostname**
+ 2. Credentials: *select you creds from drop down*
+ 1. ADD CREDS: select kind as Username with password
+ 3. Host key Verification Strategy: _Non verifying Verification Strategy_
+ 4. Click on Advanced:
+ 1. Port: 22/
+ 2. JavaPath: /usr/bin/java
+7. Click on save.
+8. Check logs, if slave is connected to master or not.
+
+## How to add AWS Creds to jenkins master.
+1. Create a JSON file with below format and save it. [ Keep it safe ]
+ ```commandline
+ {
+ "account1": {
+ "AWS_ACCESS_KEY_ID": "acces_key",
+ "AWS_SECRET_ACCESS_KEY" : "acees_secret",
+ "BUCKET" : "bucket_name"
+ },
+ "account2": {
+ "AWS_ACCESS_KEY_ID": "acces_key",
+ "AWS_SECRET_ACCESS_KEY" : "acees_secret",
+ "BUCKET" : "bucket_name"
+ }
+ }
+ ```
+2. Login into the jenkins console.
+3. Click on Manager Jenkins
+4. Select Manage Credentials
+5. Click on **System**, select the domain that your creds will be stored
+ 1. Add Credentials.
+ 1. Select **secret file**
+ 2. Give the Id
+ 3. Upload the json file
+ 2. Update Credentials
+ 1. Select the secret you want to upgrade.
+ 2. If it is a file secret.
+ 3. Upload the modified file.
\ No newline at end of file
diff --git a/jenkins/cloud_resource_orchestration/Jenkinsfile b/jenkins/cloud_resource_orchestration/Jenkinsfile
index b50df9cf..38dbd264 100644
--- a/jenkins/cloud_resource_orchestration/Jenkinsfile
+++ b/jenkins/cloud_resource_orchestration/Jenkinsfile
@@ -22,7 +22,10 @@ pipeline {
JIRA_TOKEN = credentials('JIRA_TOKEN')
JIRA_QUEUE = credentials('JIRA_QUEUE')
CLOUD_RESOURCE_ORCHESTRATION_INDEX = credentials('cloud-resource-orchestration-index')
-
+ CRO_REPLACED_USERNAMES = credentials('cloud_governance_cro_replaces_usernames')
+ CRO_PORTAL = credentials('cloud_governance_cro_portal')
+ CRO_COST_OVER_USAGE = credentials('cloud_governance_cro_cost_over_usage')
+ CRO_ES_INDEX = credentials('cloud_governance_cro_es_index')
contact1 = "ebattat@redhat.com"
contact2 = "athiruma@redhat.com"
}
@@ -34,17 +37,17 @@ pipeline {
}
stage('Initial Cleanup') {
steps {
- sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi'''
+ sh '''if [[ "$(podman images -q quay.io/athiru/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi'''
}
}
- stage('Upload ElasticSearch') {
+ stage('Run the CloudResourceOrchestration') {
steps {
sh 'python3 jenkins/cloud_resource_orchestration/run_cloud_resource_orchestration.py'
}
}
stage('Finalize Cleanup') {
steps {
- sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi'''
+ sh '''if [[ "$(podman images -q quay.io/athiru/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/athiru/cloud-governance 2> /dev/null); fi'''
deleteDir()
}
}
diff --git a/jenkins/cloud_resource_orchestration/run_cloud_resource_orchestration.py b/jenkins/cloud_resource_orchestration/run_cloud_resource_orchestration.py
index 654fec68..d33daae8 100644
--- a/jenkins/cloud_resource_orchestration/run_cloud_resource_orchestration.py
+++ b/jenkins/cloud_resource_orchestration/run_cloud_resource_orchestration.py
@@ -16,25 +16,34 @@
JIRA_QUEUE = os.environ['JIRA_QUEUE']
special_user_mails = os.environ['CLOUD_GOVERNANCE_SPECIAL_USER_MAILS']
CLOUD_RESOURCE_ORCHESTRATION_INDEX = os.environ['CLOUD_RESOURCE_ORCHESTRATION_INDEX']
-
+CRO_REPLACED_USERNAMES = os.environ['CRO_REPLACED_USERNAMES']
+CRO_DEFAULT_ADMINS = ['athiruma', 'ebattat', 'natashba']
+CRO_PORTAL = os.environ['CRO_PORTAL']
+CRO_COST_OVER_USAGE = os.environ['CRO_COST_OVER_USAGE']
+CRO_ES_INDEX = os.environ['CRO_ES_INDEX']
es_index = CLOUD_RESOURCE_ORCHESTRATION_INDEX
input_vars_to_container = [{'account': 'perf-dept', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PERF,
- 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF, 'CLOUD_NAME': 'aws'},
+ 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF, 'PUBLIC_CLOUD_NAME': 'AWS'},
{'account': 'perf-scale', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE,
- 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE, 'CLOUD_NAME': 'aws'},
+ 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE, 'PUBLIC_CLOUD_NAME': 'AWS'},
{'account': 'psap', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PSAP,
- 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PSAP, 'CLOUD_NAME': 'aws'}]
+ 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PSAP, 'PUBLIC_CLOUD_NAME': 'AWS'}]
+
+os.system('echo Run CloudResourceOrchestration in pre active region')
-print('Run LongRun in pre active region')
-regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ap-south-1']
+common_input_vars = {'es_host': ES_HOST, 'es_port': ES_PORT, 'CRO_ES_INDEX': CRO_ES_INDEX, 'log_level': 'INFO', 'LDAP_HOST_NAME': LDAP_HOST_NAME,
+ 'JIRA_QUEUE': JIRA_QUEUE, 'JIRA_TOKEN': JIRA_TOKEN, 'JIRA_USERNAME': JIRA_USERNAME, 'JIRA_URL': JIRA_URL,
+ 'CRO_COST_OVER_USAGE': CRO_COST_OVER_USAGE, 'CRO_PORTAL': CRO_PORTAL, 'CRO_DEFAULT_ADMINS': CRO_DEFAULT_ADMINS, 'CRO_REPLACED_USERNAMES': CRO_REPLACED_USERNAMES,
+ 'CE_PAYER_INDEX': 'cloud-governance-clouds-billing-reports', 'RUN_ACTIVE_REGIONS': True, 'AWS_DEFAULT_REGION': 'us-east-1', 'AWS_MAX_ATTEMPTS': 5, 'AWS_RETRY_MODE': 'standard'}
+# Added the AWS_MAX_ATTEMPTS, AWS_RETRY_MODE to handle the RateLimit Exception in aws api calls using boto3
+# for more information on throttle api calls: https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html
+# AWS Default varibles https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#:~:text=to%20use%20this.-,AWS_MAX_ATTEMPTS,-The%20total%20number
-common_input_vars = {'es_host': ES_HOST, 'es_port': ES_PORT, 'es_index': es_index, 'log_level': 'INFO', 'LDAP_HOST_NAME': LDAP_HOST_NAME,
- 'JIRA_QUEUE': JIRA_QUEUE, 'JIRA_TOKEN': JIRA_TOKEN, 'JIRA_USERNAME': JIRA_USERNAME, 'JIRA_URL': JIRA_URL, 'MANAGEMENT': True, 'special_user_mails': f"{special_user_mails}"}
combine_vars = lambda item: f'{item[0]}="{item[1]}"'
common_envs = list(map(combine_vars, common_input_vars.items()))
for input_vars in input_vars_to_container:
+ os.system(f"""echo Running on Account {input_vars.get("account").upper()}""")
envs = list(map(combine_vars, input_vars.items()))
- for region in regions:
- os.system(f"""podman run --net="host" --rm --name cloud_resource_orchestration -e MONITOR="long_run" -e AWS_DEFAULT_REGION="{region}" -e {' -e '.join(envs)} -e {' -e '.join(common_envs)} quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --net="host" --rm --name cloud_resource_orchestration -e CLOUD_RESOURCE_ORCHESTRATION="True" -e EMAIL_ALERT="True" -e {' -e '.join(envs)} -e {' -e '.join(common_envs)} quay.io/ebattat/cloud-governance:latest""")
diff --git a/jenkins/clouds/aws/daily/cost_explorer/run_upload_es.py b/jenkins/clouds/aws/daily/cost_explorer/run_upload_es.py
index ab8e365c..d9d7a1bb 100644
--- a/jenkins/clouds/aws/daily/cost_explorer/run_upload_es.py
+++ b/jenkins/clouds/aws/daily/cost_explorer/run_upload_es.py
@@ -29,8 +29,7 @@
es_index_psap = 'cloud-governance-cost-explorer-psap'
es_index_perf_scale = 'cloud-governance-cost-explorer-perf-scale'
es_index_global = 'cloud-governance-cost-explorer-global'
-
-cost_tags = ['ChargeType', 'User', 'Budget', 'Project', 'Manager', 'Owner', 'LaunchTime', 'Name', 'Email', 'Environment']
+cost_tags = ['PurchaseType', 'ChargeType', 'User', 'Budget', 'Project', 'Manager', 'Owner', 'LaunchTime', 'Name', 'Email', 'Environment', 'User:Spot']
# Cost Explorer upload to ElasticSearch
cost_metric = 'UnblendedCost' # UnblendedCost/BlendedCost
@@ -45,7 +44,7 @@
# os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="psap" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index_global}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
# os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="perf-scale" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index_global}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
-es_index_global = 'cloud-governance-cost-explorer-global-cost'
+es_index_global = 'cloud-governance-cost-explorer-perf-global-cost'
os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="perf-dept" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index_global}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="psap" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index_global}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="perf-scale" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index_global}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
diff --git a/jenkins/clouds/aws/daily/org_cost_explorer/Jenkinsfile b/jenkins/clouds/aws/daily/org_cost_explorer/Jenkinsfile
index 888aa3cf..5ccc1b1e 100644
--- a/jenkins/clouds/aws/daily/org_cost_explorer/Jenkinsfile
+++ b/jenkins/clouds/aws/daily/org_cost_explorer/Jenkinsfile
@@ -16,6 +16,12 @@ pipeline {
AWS_ACCOUNT_ROLE = credentials('cloud-governance-aws-account-role')
COST_CENTER_OWNER = credentials('cloud-governance-cost-center-owner')
REPLACE_ACCOUNT_NAME = credentials('cloud-governance-replace-account-names')
+ PAYER_SUPPORT_FEE_CREDIT = credentials('cloud-governance-aws-payer-support-fee-credit')
+ AWS_SECRET_ACCESS_KEY_ATHIRUMA_BOT = credentials('AWS_SECRET_ACCESS_KEY_ATHIRUMA_BOT')
+ AWS_ACCESS_KEY_ID_ATHIRUMA_BOT = credentials('AWS_ACCESS_KEY_ID_ATHIRUMA_BOT')
+ S3_RESULTS_PATH = credentials('S3_RESULTS_PATH')
+ ATHENA_DATABASE_NAME = credentials('ATHENA_DATABASE_NAME')
+ ATHENA_TABLE_NAME = credentials('ATHENA_TABLE_NAME')
contact1 = "ebattat@redhat.com"
contact2 = "athiruma@redhat.com"
@@ -31,7 +37,7 @@ pipeline {
sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi'''
}
}
- stage('Upload ElasticSearch') {
+ stage('Run the AWS Cost Reports') {
steps {
sh 'python3 jenkins/clouds/aws/daily/org_cost_explorer/run_org_upload_es.py'
}
diff --git a/jenkins/clouds/aws/daily/org_cost_explorer/run_org_upload_es.py b/jenkins/clouds/aws/daily/org_cost_explorer/run_org_upload_es.py
index 2696c236..f9660ccc 100644
--- a/jenkins/clouds/aws/daily/org_cost_explorer/run_org_upload_es.py
+++ b/jenkins/clouds/aws/daily/org_cost_explorer/run_org_upload_es.py
@@ -11,17 +11,35 @@
AWS_ACCOUNT_ROLE = os.environ['AWS_ACCOUNT_ROLE']
COST_CENTER_OWNER = os.environ['COST_CENTER_OWNER']
REPLACE_ACCOUNT_NAME = os.environ['REPLACE_ACCOUNT_NAME']
+PAYER_SUPPORT_FEE_CREDIT = os.environ['PAYER_SUPPORT_FEE_CREDIT']
+AWS_ACCESS_KEY_ID_ATHIRUMA_BOT = os.environ['AWS_ACCESS_KEY_ID_ATHIRUMA_BOT']
+AWS_SECRET_ACCESS_KEY_ATHIRUMA_BOT = os.environ['AWS_SECRET_ACCESS_KEY_ATHIRUMA_BOT']
+S3_RESULTS_PATH = os.environ['S3_RESULTS_PATH']
+ATHENA_DATABASE_NAME = os.environ['ATHENA_DATABASE_NAME']
+ATHENA_TABLE_NAME = os.environ['ATHENA_TABLE_NAME']
-print("Updating the Org level cost billing reports")
+os.system('echo "Updating the Org level cost billing reports"')
# Cost Explorer upload to ElasticSearch
cost_metric = 'UnblendedCost' # UnblendedCost/BlendedCost
granularity = 'DAILY' # DAILY/MONTHLY/HOURLY
-common_input_vars = {'es_host': ES_HOST, 'es_port': ES_PORT, 'es_index': 'cloud-governance-global-cost-billing-reports', 'log_level': 'INFO', 'GOOGLE_APPLICATION_CREDENTIALS': GOOGLE_APPLICATION_CREDENTIALS, 'COST_CENTER_OWNER': f"{COST_CENTER_OWNER}", 'REPLACE_ACCOUNT_NAME': REPLACE_ACCOUNT_NAME}
+common_input_vars = {'es_host': ES_HOST, 'es_port': ES_PORT, 'es_index': 'cloud-governance-global-cost-billing-reports', 'log_level': 'INFO', 'GOOGLE_APPLICATION_CREDENTIALS': GOOGLE_APPLICATION_CREDENTIALS, 'COST_CENTER_OWNER': f"{COST_CENTER_OWNER}", 'REPLACE_ACCOUNT_NAME': REPLACE_ACCOUNT_NAME, 'PAYER_SUPPORT_FEE_CREDIT': PAYER_SUPPORT_FEE_CREDIT}
combine_vars = lambda item: f'{item[0]}="{item[1]}"'
common_input_vars['es_index'] = 'cloud-governance-clouds-billing-reports'
common_envs = list(map(combine_vars, common_input_vars.items()))
os.system(f"""podman run --rm --name cloud-governance -e policy="cost_explorer_payer_billings" -e AWS_ACCOUNT_ROLE="{AWS_ACCOUNT_ROLE}" -e account="PERF-DEPT" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e SPREADSHEET_ID="{COST_SPREADSHEET_ID}" -e {' -e '.join(common_envs)} -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" quay.io/ebattat/cloud-governance:latest""")
+
+
+os.system('echo "Run the Spot Analysis report over the account using AWS Athena"')
+os.system(f"""podman run --rm --name cloud-governance -e policy="spot_savings_analysis" -e account="pnt-payer" \
+-e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_ATHIRUMA_BOT}" \
+-e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_ATHIRUMA_BOT}" \
+-e es_host="{ES_HOST}" -e es_port="{ES_PORT}" \
+-e es_index="cloud-governance-clouds-billing-reports" \
+-e S3_RESULTS_PATH="{S3_RESULTS_PATH}" \
+-e ATHENA_DATABASE_NAME="{ATHENA_DATABASE_NAME}" \
+-e ATHENA_TABLE_NAME="{ATHENA_TABLE_NAME}" \
+quay.io/ebattat/cloud-governance:latest""")
diff --git a/jenkins/clouds/aws/daily/policies/run_policies.py b/jenkins/clouds/aws/daily/policies/run_policies.py
index 70685243..7b70f002 100644
--- a/jenkins/clouds/aws/daily/policies/run_policies.py
+++ b/jenkins/clouds/aws/daily/policies/run_policies.py
@@ -55,35 +55,61 @@ def get_policies(type: str = None):
policies.remove('cost_over_usage')
policies.remove('monthly_report')
policies.remove('cost_billing_reports')
+policies.remove('cost_explorer_payer_billings')
for region in regions:
for policy in policies:
# Delete zombie cluster resource every night dry_run=no
if policy == 'zombie_cluster_resource':
- os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
- os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PSAP" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PSAP}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
- os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PSAP" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PSAP}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
# running policies dry_run=no per every region, ebs_unattached, ec2_stop, ip_unattached, ec2_idle, nat_gateway_unused, zombie_snapshots
- elif policy in ('ec2_idle', 'nat_gateway_unused', 'zombie_snapshots', 'ec2_stop', 'ebs_unattached', 'ip_unattached'):
+ elif policy in ('zombie_snapshots', 'ebs_unattached'):
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ elif policy in ('ec2_idle', 'ec2_stop'):
os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ elif policy in ('nat_gateway_unused', 'ip_unattached'):
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e MANAGER_EMAIL_ALERT="False" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e MANAGER_EMAIL_ALERT="False" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
# running policies dry_run=no only one region, empty_roles, s3_inactive
elif policy in ('empty_roles', 's3_inactive') and region == 'us-east-1':
- os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
- os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e MANAGER_EMAIL_ALERT="False" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e MANAGER_EMAIL_ALERT="False" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="no" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
# running policies dry_run=yes per every region ebs_in_use, ec2_run
else:
if policy not in ('empty_roles', 's3_inactive'):
- os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
- os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PSAP" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PSAP}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
- os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-DEPT" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PSAP" -e MANAGER_EMAIL_ALERT="False" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PSAP}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e EMAIL_ALERT="False" -e account="PERF-SCALE" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e special_user_mails="{special_user_mails}" -e account_admin="{account_admin}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_PERF_SCALE}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
# Update AWS IAM User tags from the spreadsheet
os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-DEPT" -e policy="tag_iam_user" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e user_tag_operation="update" -e SPREADSHEET_ID="{SPREADSHEET_ID}" -e GOOGLE_APPLICATION_CREDENTIALS="{GOOGLE_APPLICATION_CREDENTIALS}" -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" -e account_admin="{account_admin}" -e special_user_mails="{special_user_mails}" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PERF-SCALE" -e policy="tag_iam_user" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e user_tag_operation="update" -e SPREADSHEET_ID="{SPREADSHEET_ID}" -e GOOGLE_APPLICATION_CREDENTIALS="{GOOGLE_APPLICATION_CREDENTIALS}" -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" -e account_admin="{account_admin}" -e special_user_mails="{special_user_mails}" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
os.system(f"""podman run --rm --name cloud-governance --net="host" -e account="PSAP" -e policy="tag_iam_user" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e user_tag_operation="update" -e SPREADSHEET_ID="{SPREADSHEET_ID}" -e GOOGLE_APPLICATION_CREDENTIALS="{GOOGLE_APPLICATION_CREDENTIALS}" -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" -e account_admin="{account_admin}" -e special_user_mails="{special_user_mails}" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e log_level="INFO" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" quay.io/ebattat/cloud-governance:latest""")
-# Gitleaks run on github not related to any aws account
+
+# Send Policy alerts to users
+accounts = [{'account': 'PERF-DEPT', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PERF,
+ 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF, 'BUCKET_NAME': BUCKET_PERF},
+ {'account': 'PERF-SCALE', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE,
+ 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE, 'BUCKET_NAME': BUCKET_PERF_SCALE},
+ {'account': 'PSAP', 'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID_DELETE_PSAP,
+ 'AWS_SECRET_ACCESS_KEY': AWS_SECRET_ACCESS_KEY_DELETE_PSAP, 'BUCKET_NAME': BUCKET_PSAP}]
+policies.remove('ec2_run')
+policies.remove('ebs_in_use')
+remove_polices = ['ec2_run', 'ebs_in_use', 'zombie_cluster_resource', 'ec2_idle', 'skipped_resources', 'ec2_stop'] # policies that will not aggregate
+policies = [policy.replace('_', '-') for policy in policies if policy not in remove_polices]
+common_input_vars = {'PUBLIC_CLOUD_NAME': 'AWS', 'BUCKET_KEY': 'logs', 'KERBEROS_USERS': f"{special_user_mails}", 'LDAP_HOST_NAME': f"{LDAP_HOST_NAME}", 'log_level': "INFO", 'MAIL_ALERT_DAYS': "[4, 6, 7]", 'POLICY_ACTIONS_DAYS': "[7]", 'POLICIES_TO_ALERT': policies, 'es_host': ES_HOST, 'es_port': ES_PORT}
+combine_vars = lambda item: f'{item[0]}="{item[1]}"'
+common_envs = list(map(combine_vars, common_input_vars.items()))
+for account in accounts:
+ envs = list(map(combine_vars, account.items()))
+ os.system(f"""podman run --rm --name cloud-governance --net="host" -e policy="send_aggregated_alerts" -e {' -e '.join(envs)} -e {' -e '.join(common_envs)} -e DEFAULT_ADMINS="['athiruma']" quay.io/ebattat/cloud-governance:latest""")
+
+# # Gitleaks run on github not related to any aws account
print("run gitleaks")
region = 'us-east-1'
policy = 'gitleaks'
diff --git a/jenkins/clouds/aws/hourly/tagging/tagging.py b/jenkins/clouds/aws/hourly/tagging/tagging.py
index 34d15e49..18670824 100644
--- a/jenkins/clouds/aws/hourly/tagging/tagging.py
+++ b/jenkins/clouds/aws/hourly/tagging/tagging.py
@@ -19,6 +19,6 @@
regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-central-1', 'ap-south-1', 'eu-north-1', 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-3', 'sa-east-1']
for region in regions:
- os.system(f"""podman run --rm --name cloud-governance -e account="perf" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_perf}" -e log_level="INFO" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""")
- os.system(f"""podman run --rm --name cloud-governance -e account="psap" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_psap}" -e log_level="INFO" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""")
- os.system(f"""podman run --rm --name cloud-governance -e account="perf-scale" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_perf_scale}" -e log_level="INFO" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance -e account="perf" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_perf}" -e log_level="INFO" -e "AWS_MAX_ATTEMPTS"="5" -e "AWS_RETRY_MODE"="standard" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance -e account="psap" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PSAP}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PSAP}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_psap}" -e log_level="INFO" -e "AWS_MAX_ATTEMPTS"="5" -e "AWS_RETRY_MODE"="standard" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""")
+ os.system(f"""podman run --rm --name cloud-governance -e account="perf-scale" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_DELETE_PERF_SCALE}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_DELETE_PERF_SCALE}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_perf_scale}" -e "AWS_MAX_ATTEMPTS"="5" -e "AWS_RETRY_MODE"="standard" -e log_level="INFO" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""")
diff --git a/jenkins/clouds/gcp/daily/cost_reports/Jenkinsfile b/jenkins/clouds/gcp/daily/cost_reports/Jenkinsfile
new file mode 100644
index 00000000..e638348b
--- /dev/null
+++ b/jenkins/clouds/gcp/daily/cost_reports/Jenkinsfile
@@ -0,0 +1,58 @@
+pipeline {
+ agent {
+ docker {
+ label 'cloud-governance-worker'
+ image 'quay.io/athiru/centos-stream8-podman:latest'
+ args '-u root -v /etc/postfix/main.cf:/etc/postfix/main.cf --privileged'
+ }
+ }
+ environment {
+ GCP_DATABASE_NAME = credentials('cloud-governance-gcp-database-name')
+ GCP_DATABASE_TABLE_NAME = credentials('cloud-governance-gcp-database-table-name')
+ ES_HOST = credentials('cloud-governance-es-host')
+ ES_PORT = credentials('cloud-governance-es-port')
+ COST_SPREADSHEET_ID = credentials('cloud-governance-cost-spreadsheet-id')
+ GOOGLE_APPLICATION_CREDENTIALS = credentials('cloud-governance-google-application-credentials')
+
+ contact1 = "ebattat@redhat.com"
+ contact2 = "athiruma@redhat.com"
+ }
+ stages {
+ stage('Checkout') { // Checkout (git clone ...) the projects repository
+ steps {
+ checkout scm
+ }
+ }
+ stage('Initial Cleanup') {
+ steps {
+ sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi'''
+ }
+ }
+ stage('Upload ElasticSearch') {
+ steps {
+ sh 'python3 jenkins/clouds/gcp/daily/cost_reports/run_reports.py'
+ }
+ }
+ stage('Finalize Cleanup') {
+ steps {
+ sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi'''
+ deleteDir()
+ }
+ }
+ }
+ post {
+ always {
+ deleteDir()
+ }
+ failure {
+ script {
+ msg = "Build error for ${env.JOB_NAME} ${env.BUILD_NUMBER} (${env.BUILD_URL})"
+ emailext body: """\
+ Jenkins job: ${env.BUILD_URL}\nSee the console output for more details: ${env.BUILD_URL}consoleFull\n\n
+ """,
+ subject: msg,
+ to: "${contact1}, ${contact2}"
+ }
+ }
+ }
+}
diff --git a/jenkins/clouds/gcp/daily/cost_reports/run_reports.py b/jenkins/clouds/gcp/daily/cost_reports/run_reports.py
new file mode 100644
index 00000000..e89d1647
--- /dev/null
+++ b/jenkins/clouds/gcp/daily/cost_reports/run_reports.py
@@ -0,0 +1,21 @@
+
+
+import os
+
+GCP_DATABASE_NAME = os.environ['GCP_DATABASE_NAME']
+GCP_DATABASE_TABLE_NAME = os.environ['GCP_DATABASE_TABLE_NAME']
+ES_HOST = os.environ['ES_HOST']
+ES_PORT = os.environ['ES_PORT']
+COST_SPREADSHEET_ID = os.environ['COST_SPREADSHEET_ID']
+GOOGLE_APPLICATION_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
+
+print('Running the GCP cost billing reports')
+
+common_input_vars = {'es_host': ES_HOST, 'es_port': ES_PORT, 'es_index': 'cloud-governance-clouds-billing-reports',
+ 'log_level': 'INFO', 'GOOGLE_APPLICATION_CREDENTIALS': GOOGLE_APPLICATION_CREDENTIALS,
+ 'PUBLIC_CLOUD_NAME': 'GCP', 'SPREADSHEET_ID': COST_SPREADSHEET_ID,
+ 'GCP_DATABASE_NAME': GCP_DATABASE_NAME, 'GCP_DATABASE_TABLE_NAME': GCP_DATABASE_TABLE_NAME}
+
+combine_vars = lambda item: f'{item[0]}="{item[1]}"'
+common_envs = list(map(combine_vars, common_input_vars.items()))
+os.system(f"""podman run --rm --name cloud-governance -e policy="cost_billing_reports" -e {' -e '.join(common_envs)} -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" quay.io/ebattat/cloud-governance:latest""")
diff --git a/jenkins/docker-compose.yml b/jenkins/docker-compose.yml
new file mode 100644
index 00000000..de6802e9
--- /dev/null
+++ b/jenkins/docker-compose.yml
@@ -0,0 +1,40 @@
+version: '2.2'
+
+services:
+ grafana:
+ image: grafana/grafana:8.2.0
+ container_name: grafana
+ ports:
+ - "3000:3000"
+ networks:
+ - monitoring-net
+ volumes:
+ - grafana-data:/var/lib/grafana
+
+ elasticsearch:
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
+ container_name: elasticsearch
+ ports:
+ - "9200:9200"
+ environment:
+ - discovery.type=single-node
+ - xpack.security.enabled=false
+ networks:
+ - monitoring-net
+ volumes:
+ - elasticsearch-data:/usr/share/elasticsearch/data
+
+ kibana:
+ image: docker.elastic.co/kibana/kibana:8.8.0
+ container_name: kibana
+ ports:
+ - "5601:5601"
+ environment:
+ - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
+ networks:
+ - monitoring-net
+networks:
+ monitoring-net:
+volumes:
+ elasticsearch-data:
+ grafana-data:
diff --git a/jenkins/poc/haim/daily/Jenkinsfile b/jenkins/poc/haim/daily/Jenkinsfile
new file mode 100644
index 00000000..616cb281
--- /dev/null
+++ b/jenkins/poc/haim/daily/Jenkinsfile
@@ -0,0 +1,62 @@
+pipeline {
+ options {
+ disableConcurrentBuilds()
+ }
+ agent {
+ docker {
+ label 'cloud-governance-worker'
+ image 'quay.io/athiru/centos-stream8-podman:latest'
+ args '-u root -v /etc/postfix/main.cf:/etc/postfix/main.cf --privileged'
+ }
+ }
+ environment {
+ AWS_ACCESS_KEY_ID_APPENG = credentials('cloud-governance-aws-access-key-id-appeng')
+ AWS_SECRET_ACCESS_KEY_APPENG = credentials('cloud-governance-aws-secret-access-key-appeng')
+ AWS_IAM_USER_SPREADSHEET_ID = credentials('cloud-governance-aws-iam-user-spreadsheet-id')
+ GOOGLE_APPLICATION_CREDENTIALS = credentials('cloud-governance-google-application-credentials')
+ LDAP_HOST_NAME = credentials('cloud-governance-ldap-host-name')
+ ES_HOST = credentials('cloud-governance-es-host')
+ ES_PORT = credentials('cloud-governance-es-port')
+ BUCKET_APPENG = credentials('cloud-governance-bucket-appeng')
+ contact1 = "ebattat@redhat.com"
+ contact2 = "athiruma@redhat.com"
+ }
+ stages {
+ stage('Checkout') { // Checkout (git clone ...) the projects repository
+ steps {
+ checkout scm
+ }
+ }
+ stage('Initial Cleanup') {
+ steps {
+ sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi'''
+ }
+ }
+ stage('Run Policies for haim poc') {
+ steps {
+ sh 'python3 jenkins/poc/haim/daily/run_policies.py'
+ }
+ }
+ stage('Upload Policies output to ElasticSearch for haim poc') {
+ steps {
+ sh 'python3 jenkins/poc/haim/daily/es_upload.py'
+ }
+ }
+ stage('Finalize Cleanup') {
+ steps {
+ sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi'''
+ deleteDir()
+ }
+ }
+ }
+ post {
+ failure {
+ script {
+ msg = "Build error for ${env.JOB_NAME} ${env.BUILD_NUMBER} (${env.BUILD_URL})"
+ emailext body: """\
+ Jenkins job: ${env.BUILD_URL}\nSee the console output for more details: ${env.BUILD_URL}consoleFull\n\n
+ """,subject: msg, to: "${contact1}, ${contact2}, ${contact3}"
+ }
+ }
+ }
+}
diff --git a/jenkins/poc/haim/daily/es_upload.py b/jenkins/poc/haim/daily/es_upload.py
new file mode 100644
index 00000000..65987c78
--- /dev/null
+++ b/jenkins/poc/haim/daily/es_upload.py
@@ -0,0 +1,45 @@
+
+import os
+
+
+AWS_ACCESS_KEY_ID_APPENG = os.environ['AWS_ACCESS_KEY_ID_APPENG']
+AWS_SECRET_ACCESS_KEY_APPENG = os.environ['AWS_SECRET_ACCESS_KEY_APPENG']
+LDAP_HOST_NAME = os.environ['LDAP_HOST_NAME']
+BUCKET_APPENG = os.environ['BUCKET_APPENG']
+ES_HOST = os.environ['ES_HOST']
+ES_PORT = os.environ['ES_PORT']
+LOGS = os.environ.get('LOGS', 'logs')
+
+
+def get_policies(type: str = None):
+ """
+ This method return a list of policies name without extension, that can filter by type
+ @return: list of policies name
+ """
+ policies = []
+ policies_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))), 'cloud_governance', 'policy', 'aws')
+ for (dirpath, dirnames, filenames) in os.walk(policies_path):
+ for filename in filenames:
+ if not filename.startswith('__') and (filename.endswith('.yml') or filename.endswith('.py')):
+ if not type:
+ policies.append(os.path.splitext(filename)[0])
+ elif type and type in filename:
+ policies.append(os.path.splitext(filename)[0])
+ return policies
+
+
+regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-central-1', 'ap-south-1', 'eu-north-1', 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-3', 'sa-east-1']
+
+os.system('echo "Upload data to ElasticSearch - ec2 index"')
+
+es_index = 'cloud-governance-appeng-ec2-index'
+es_doc_type = '_doc'
+for region in regions:
+ for policy_types in ['ec2', 'zombie', 'ebs', 'empty_roles', 's3', 'ip', 'nat_gateway_unused']:
+ policies = get_policies(type=policy_types)
+ for policy in policies:
+ if policy in ('empty_roles', 's3_inactive'):
+ if region == 'us-east-1':
+ os.system(f"""podman run --rm --name cloud-governance-poc-haim -e upload_data_es="upload_data_es" -e account="APPENG" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index}" -e es_doc_type="{es_doc_type}" -e bucket="{BUCKET_APPENG}" -e policy="{policy}" -e AWS_DEFAULT_REGION="{region}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ else:
+ os.system(f"""podman run --rm --name cloud-governance-poc-haim -e upload_data_es="upload_data_es" -e account="APPENG" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{es_index}" -e es_doc_type="{es_doc_type}" -e bucket="{BUCKET_APPENG}" -e policy="{policy}" -e AWS_DEFAULT_REGION="{region}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
diff --git a/jenkins/poc/haim/daily/run_policies.py b/jenkins/poc/haim/daily/run_policies.py
new file mode 100644
index 00000000..c6aedc4a
--- /dev/null
+++ b/jenkins/poc/haim/daily/run_policies.py
@@ -0,0 +1,53 @@
+
+import os
+
+
+AWS_ACCESS_KEY_ID_APPENG = os.environ['AWS_ACCESS_KEY_ID_APPENG']
+AWS_SECRET_ACCESS_KEY_APPENG = os.environ['AWS_SECRET_ACCESS_KEY_APPENG']
+LDAP_HOST_NAME = os.environ['LDAP_HOST_NAME']
+LOGS = os.environ.get('LOGS', 'logs')
+ES_HOST = os.environ['ES_HOST']
+ES_PORT = os.environ['ES_PORT']
+BUCKET_APPENG = os.environ['BUCKET_APPENG']
+
+
+def get_policies(type: str = None):
+ """
+ This method return a list of policies name without extension, that can filter by type
+ @return: list of policies name
+ """
+ policies = []
+ policies_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))), 'cloud_governance', 'policy', 'aws')
+ for (dirpath, dirnames, filenames) in os.walk(policies_path):
+ for filename in filenames:
+ if not filename.startswith('__') and (filename.endswith('.yml') or filename.endswith('.py')):
+ if not type:
+ policies.append(os.path.splitext(filename)[0])
+ elif type and type in filename:
+ policies.append(os.path.splitext(filename)[0])
+ return policies
+
+
+regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-central-1', 'ap-south-1', 'eu-north-1', 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-3', 'sa-east-1']
+policies = get_policies()
+not_action_policies = ['cost_explorer', 'cost_over_usage', 'monthly_report', 'cost_billing_reports', 'cost_explorer_payer_billings']
+run_policies = list(set(policies) - set(not_action_policies))
+run_policies.sort()
+
+
+os.system(f"""echo Running the cloud_governance policies: {run_policies}""")
+os.system(f"""echo "Running the CloudGovernance policies" """)
+for region in regions:
+ for policy in run_policies:
+ if policy in ('empty_roles', 's3_inactive') and region == 'us-east-1':
+ os.system(f"""podman run --rm --name cloud-governance-poc-haim --net="host" -e MANAGER_EMAIL_ALERT="False" -e EMAIL_ALERT="False" -e account="APPENG" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_APPENG}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+ else:
+ os.system(f"""podman run --rm --name cloud-governance-poc-haim --net="host" -e MANAGER_EMAIL_ALERT="False" -e EMAIL_ALERT="False" -e account="APPENG" -e policy="{policy}" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e AWS_DEFAULT_REGION="{region}" -e dry_run="yes" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e policy_output="s3://{BUCKET_APPENG}/{LOGS}/{region}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+
+
+cost_tags = ['PurchaseType', 'ChargeType', 'User', 'Budget', 'Project', 'Manager', 'Owner', 'LaunchTime', 'Name', 'Email', 'Environment', 'User:Spot']
+cost_metric = 'UnblendedCost' # UnblendedCost/BlendedCost
+granularity = 'DAILY' # DAILY/MONTHLY/HOURLY
+cost_explorer_index = 'cloud-governance-haim-cost-explorer-global-index'
+os.system(f"""echo "Running the CloudGovernance CostExplorer Policies" """)
+os.system(f"""podman run --rm --name cloud-governance -e AWS_DEFAULT_REGION="us-east-1" -e account="appeng" -e policy="cost_explorer" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e es_host="{ES_HOST}" -e es_port="{ES_PORT}" -e es_index="{cost_explorer_index}" -e cost_explorer_tags="{cost_tags}" -e granularity="{granularity}" -e cost_metric="{cost_metric}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
diff --git a/jenkins/poc/haim/hourly/Jenkinsfile b/jenkins/poc/haim/hourly/Jenkinsfile
new file mode 100644
index 00000000..696a043a
--- /dev/null
+++ b/jenkins/poc/haim/hourly/Jenkinsfile
@@ -0,0 +1,54 @@
+pipeline {
+ options {
+ disableConcurrentBuilds()
+ }
+ agent {
+ docker {
+ label 'cloud-governance-worker'
+ image 'quay.io/athiru/centos-stream8-podman:latest'
+ args '-u root -v /etc/postfix/main.cf:/etc/postfix/main.cf --privileged'
+ }
+ }
+ environment {
+ AWS_ACCESS_KEY_ID_APPENG = credentials('cloud-governance-aws-access-key-id-appeng')
+ AWS_SECRET_ACCESS_KEY_APPENG = credentials('cloud-governance-aws-secret-access-key-appeng')
+ AWS_IAM_USER_SPREADSHEET_ID = credentials('cloud-governance-aws-iam-user-spreadsheet-id')
+ GOOGLE_APPLICATION_CREDENTIALS = credentials('cloud-governance-google-application-credentials')
+ LDAP_HOST_NAME = credentials('cloud-governance-ldap-host-name')
+ contact1 = "ebattat@redhat.com"
+ contact2 = "athiruma@redhat.com"
+ }
+ stages {
+ stage('Checkout') { // Checkout (git clone ...) the projects repository
+ steps {
+ checkout scm
+ }
+ }
+ stage('Initial Cleanup') {
+ steps {
+ sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi'''
+ }
+ }
+ stage('Run Tagging Cluster & Non-Cluster') {
+ steps {
+ sh 'python3 jenkins/poc/haim/hourly/run_policies.py'
+ }
+ }
+ stage('Finalize Cleanup') {
+ steps {
+ sh '''if [[ "$(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null)" != "" ]]; then podman rmi -f $(podman images -q quay.io/ebattat/cloud-governance 2> /dev/null); fi'''
+ deleteDir()
+ }
+ }
+ }
+ post {
+ failure {
+ script {
+ msg = "Build error for ${env.JOB_NAME} ${env.BUILD_NUMBER} (${env.BUILD_URL})"
+ emailext body: """\
+ Jenkins job: ${env.BUILD_URL}\nSee the console output for more details: ${env.BUILD_URL}consoleFull\n\n
+ """,subject: msg, to: "${contact1}, ${contact2}, ${contact3}"
+ }
+ }
+ }
+}
diff --git a/jenkins/poc/haim/hourly/run_policies.py b/jenkins/poc/haim/hourly/run_policies.py
new file mode 100644
index 00000000..173483ff
--- /dev/null
+++ b/jenkins/poc/haim/hourly/run_policies.py
@@ -0,0 +1,23 @@
+
+import os
+
+
+AWS_ACCESS_KEY_ID_APPENG = os.environ['AWS_ACCESS_KEY_ID_APPENG']
+AWS_SECRET_ACCESS_KEY_APPENG = os.environ['AWS_SECRET_ACCESS_KEY_APPENG']
+LDAP_HOST_NAME = os.environ['LDAP_HOST_NAME']
+GOOGLE_APPLICATION_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
+SPREADSHEET_ID = os.environ['AWS_IAM_USER_SPREADSHEET_ID']
+
+
+LOGS = os.environ.get('LOGS', 'logs')
+
+mandatory_tags_appeng = {'Budget': 'APPENG'}
+
+os.system(f"""echo "Running the tag_iam_user" """)
+os.system(f"""podman run --rm --name cloud-governance-poc-haim --net="host" -e account="APPENG" -e -e EMAIL_ALERT="False" -e policy="tag_iam_user" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e user_tag_operation="update" -e SPREADSHEET_ID="{SPREADSHEET_ID}" -e GOOGLE_APPLICATION_CREDENTIALS="{GOOGLE_APPLICATION_CREDENTIALS}" -v "{GOOGLE_APPLICATION_CREDENTIALS}":"{GOOGLE_APPLICATION_CREDENTIALS}" -e LDAP_HOST_NAME="{LDAP_HOST_NAME}" -e log_level="INFO" quay.io/ebattat/cloud-governance:latest""")
+
+
+os.system(f"""echo "Running the tag_resources" """)
+regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-central-1', 'ap-south-1', 'eu-north-1', 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-3', 'sa-east-1']
+for region in regions:
+ os.system(f"""podman run --rm --name cloud-governance-poc-haim -e account="APPENG" -e EMAIL_ALERT="False" -e policy="tag_resources" -e AWS_ACCESS_KEY_ID="{AWS_ACCESS_KEY_ID_APPENG}" -e AWS_SECRET_ACCESS_KEY="{AWS_SECRET_ACCESS_KEY_APPENG}" -e AWS_DEFAULT_REGION="{region}" -e tag_operation="update" -e mandatory_tags="{mandatory_tags_appeng}" -e log_level="INFO" -v "/etc/localtime":"/etc/localtime" quay.io/ebattat/cloud-governance:latest""")
diff --git a/jenkins/podman_pod.yml b/jenkins/podman_pod.yml
new file mode 100644
index 00000000..8a050426
--- /dev/null
+++ b/jenkins/podman_pod.yml
@@ -0,0 +1,46 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: cloud-governance-pod
+spec:
+ containers:
+ - name: elasticsearch
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
+ ports:
+ - containerPort: 9200
+ hostPort: 9200
+ env:
+ - name: discovery.type
+ value: "single-node"
+ - name: xpack.security.enabled
+ value: "false"
+ volumeMounts:
+ - mountPath: /usr/share/elasticsearch/data
+ name: elasticsearch-data
+ - name: kibana
+ image: docker.elastic.co/kibana/kibana:8.8.0
+ ports:
+ - containerPort: 5601
+ hostPort: 5601
+ env:
+ - name: ELASTICSEARCH_HOSTS
+ value: http://localhost:9200
+ - name: grafana
+ image: docker.io/grafana/grafana:8.2.0
+ ports:
+ - containerPort: 3000
+ hostPort: 3000
+ volumeMounts:
+ - mountPath: /var/lib/grafana
+ name: grafana-data
+ volumes:
+ - name: elasticsearch-data
+ hostPath:
+ path: $CLOUD_GOVERNANCE_PATH/elasticsearch
+ type: DirectoryOrCreate
+ - name: grafana-data
+ hostPath:
+ path: $CLOUD_GOVERNANCE_PATH/grafana
+ type: DirectoryOrCreate
+
+# replace CLOUD_GOVERNANCE_PATH with directory_name
diff --git a/requirements.txt b/requirements.txt
index 79174a42..9a6c7e74 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,29 +1,30 @@
+aiohttp==3.8.1
attrs==21.4.0
azure-identity==1.12.0
azure-mgmt-billing==6.0.0
azure-mgmt-costmanagement==3.0.0
azure-mgmt-subscription==3.1.1
-botocore==1.29.1
-boto3==1.26.1
+boto3==1.26.4
+botocore==1.29.4
elasticsearch==7.11.0
elasticsearch-dsl==7.4.0
google-api-python-client==2.57.0
google-auth-httplib2==0.1.0
google-auth-oauthlib==0.5.2
+google-cloud-bigquery==3.5.0
+google-cloud-billing==1.9.1
ibm_platform_services==0.27.0
myst-parser==0.17.0
+oauthlib~=3.1.1
pandas
+PyAthena[Pandas]==3.0.5
PyGitHub==1.55
-requests==2.27.1
+python-ldap==3.4.2
+requests==2.31.0
retry==0.9.2
SoftLayer==6.0.0
sphinx==4.5.0
sphinx-rtd-theme==1.0.0
-python-ldap==3.4.2
typing==3.7.4.3
typeguard==2.13.3
-
-# EC2 LongRun Required by Jira
-aiohttp==3.8.1
urllib3==1.26.7
-oauthlib~=3.1.1
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 4b6e6bd8..c3f5a1b3 100644
--- a/setup.py
+++ b/setup.py
@@ -2,13 +2,10 @@
from os import path
from setuptools import setup, find_packages
-
-__version__ = '1.1.74'
-
+__version__ = '1.1.146'
here = path.abspath(path.dirname(__file__))
-
if path.isfile(path.join(here, 'README.md')):
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
@@ -41,33 +38,37 @@
packages=find_packages(include=['cloud_governance', 'cloud_governance.*']),
install_requires=[
+ 'aiohttp==3.8.1', # required by jira
'attrs==21.4.0', # readthedocs
'azure-identity==1.12.0', # azure identity
+ 'azure-mgmt-billing==6.0.0', # azure billing management
'azure-mgmt-costmanagement==3.0.0', # azure cost management
'azure-mgmt-subscription==3.1.1', # azure subscriptions
- 'azure-mgmt-billing==6.0.0', # azure billing management
- 'botocore==1.29.1', # required by c7n 0.9.14
- 'boto3==1.26.1', # required by c7n 0.9.14
- 'elasticsearch==7.11.0', # depend on elasticsearch server
+ 'boto3==1.26.4', # required by c7n 0.9.14
+ 'botocore==1.29.4', # required by c7n 0.9.14
'elasticsearch-dsl==7.4.0',
+ 'elasticsearch==7.11.0', # depend on elasticsearch server
'google-api-python-client==2.57.0', # google drive
'google-auth-httplib2==0.1.0', # google drive
'google-auth-oauthlib==0.5.2', # google drive
+ 'google-cloud-bigquery==3.5.0', # google cloud cost
+ 'google-cloud-billing==1.9.1', # google cloud cost
'ibm_platform_services==0.27.0', # IBM Usage reports
'myst-parser==0.17.0', # readthedocs
+ 'oauthlib~=3.1.1', # required by jira
'pandas', # latest: aggregate ec2/ebs cluster data
+ 'PyAthena[Pandas]==3.0.5', # AWS Athena package
'PyGitHub==1.55', # gitleaks
'python-ldap==3.4.2', # prerequisite: sudo dnf install -y python39-devel openldap-devel gcc
- 'requests==2.27.1', # rest api & lambda
+ 'requests==2.31.0', # rest api & lambda
'retry==0.9.2',
'SoftLayer==6.0.0', # IBM SoftLayer
- 'sphinx==4.5.0', # readthedocs
'sphinx-rtd-theme==1.0.0', # readthedocs
- 'typing==3.7.4.3',
+ 'sphinx==4.5.0', # readthedocs
'typeguard==2.13.3', # checking types
- 'aiohttp==3.8.1', # required by jira
+ 'typing==3.7.4.3',
'urllib3==1.26.7', # required by jira
- 'oauthlib~=3.1.1', # required by jira
+
],
setup_requires=['pytest', 'pytest-runner', 'wheel', 'coverage'],
diff --git a/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py b/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py
index 81b253d2..9a6eb190 100644
--- a/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py
+++ b/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py
@@ -1,3 +1,6 @@
+import datetime
+
+import pytest
from cloud_governance.common.clouds.azure.cost_management.cost_management_operations import CostManagementOperations
@@ -8,7 +11,13 @@ def test_get_usage():
@return:
"""
cost_management_operations = CostManagementOperations()
- cost_usage_data = cost_management_operations.get_usage(scope=cost_management_operations.azure_operations.scope)
+ end_date = datetime.datetime.utcnow() - datetime.timedelta(days=2)
+ start_date = end_date - datetime.timedelta(days=1)
+ granularity = 'Daily'
+ cost_usage_data = cost_management_operations.get_usage(scope=cost_management_operations.azure_operations.scope,
+ start_date=start_date, end_date=end_date,
+ granularity=granularity
+ )
assert cost_usage_data
@@ -18,5 +27,10 @@ def test_get_forecast():
@return:
"""
cost_management_operations = CostManagementOperations()
- cost_forecast_data = cost_management_operations.get_forecast(scope=cost_management_operations.azure_operations.scope)
+ end_date = datetime.datetime.utcnow() + datetime.timedelta(days=1)
+ start_date = end_date - datetime.timedelta(days=1)
+ granularity = 'Daily'
+ cost_forecast_data = cost_management_operations.get_forecast(scope=cost_management_operations.azure_operations.scope,
+ start_date=start_date, end_date=end_date,
+ granularity=granularity)
assert cost_forecast_data
diff --git a/tests/integration/cloud_governance/common/clouds/gcp/__init__.py b/tests/integration/cloud_governance/common/clouds/gcp/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/integration/cloud_governance/common/clouds/gcp/test_google_account.py b/tests/integration/cloud_governance/common/clouds/gcp/test_google_account.py
new file mode 100644
index 00000000..f0d504bf
--- /dev/null
+++ b/tests/integration/cloud_governance/common/clouds/gcp/test_google_account.py
@@ -0,0 +1,27 @@
+from datetime import datetime, timedelta
+
+from cloud_governance.common.clouds.gcp.google_account import GoogleAccount
+from cloud_governance.main.environment_variables import environment_variables
+
+
+def test_query_list():
+ """
+ This method test fetching of the big queries data
+ :return:
+ """
+ environment_variables_dict = environment_variables.environment_variables_dict
+ database_name = environment_variables_dict.get('GCP_DATABASE_NAME')
+ database_table_name = environment_variables_dict.get('GCP_DATABASE_TABLE_NAME')
+ current_date = datetime.now() - timedelta(days=1)
+ month = str(current_date.month)
+ if len(month) != 2:
+ month = f'0{month}'
+ year = current_date.year
+ year_month = f'{year}{month}'
+ fetch_query = f"""SELECT invoice.month
+ FROM `{database_name}.{database_table_name}`
+ where invoice.month = '{year_month}' group by invoice.month"""
+ gcp_account = GoogleAccount()
+ result_year_month = gcp_account.query_list([fetch_query])[0][0].get('month')
+ assert result_year_month == year_month
+
diff --git a/tests/integration/cloud_governance/common/elasticsearch/test_elasticsearch_operations.py b/tests/integration/cloud_governance/common/elasticsearch/test_elasticsearch_operations.py
index d8c3a445..1e184392 100644
--- a/tests/integration/cloud_governance/common/elasticsearch/test_elasticsearch_operations.py
+++ b/tests/integration/cloud_governance/common/elasticsearch/test_elasticsearch_operations.py
@@ -48,7 +48,7 @@ def test_delete_data_between_range():
es.delete_data_in_between_in_es(es_index=es_index, start_datetime=start_time, end_datetime=end_time)
start_time = end_time.replace(hour=0, minute=0, second=0)
end_time = datetime.datetime.now()
- assert len(es.fetch_data_between_range(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 1
+ assert len(es.fetch_data_by_es_query(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 1
es.delete_data_in_es(es_index=es_index)
@@ -63,10 +63,10 @@ def test_fetch_data_between_range():
time.sleep(3)
end_time = datetime.datetime.now()
start_time = (end_time - datetime.timedelta(1)).replace(hour=0, minute=0, second=0)
- assert len(es.fetch_data_between_range(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 1
+ assert len(es.fetch_data_by_es_query(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 1
es.delete_data_in_es(es_index=es_index)
start_time = end_time - datetime.timedelta(1)
- assert len(es.fetch_data_between_range(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 0
+ assert len(es.fetch_data_by_es_query(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 0
es.delete_data_in_es(es_index=es_index)
@@ -78,4 +78,4 @@ def test_delete_data_in_elastic_search():
es.delete_data_in_es(es_index=es_index)
end_time = datetime.datetime.now().replace(hour=0, minute=0, second=0)
start_time = (end_time - datetime.timedelta(1)).replace(hour=0, minute=0, second=0)
- assert len(es.fetch_data_between_range(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 0
+ assert len(es.fetch_data_by_es_query(es_index=es_index, start_datetime=start_time, end_datetime=end_time)) == 0
diff --git a/tests/unittest/cloud_governance/aws/zombie_cluster/test_zombie_ec2_cluster_delete_resource.py b/tests/unittest/cloud_governance/aws/zombie_cluster/test_zombie_ec2_cluster_delete_resource.py
index d868553a..8f245316 100644
--- a/tests/unittest/cloud_governance/aws/zombie_cluster/test_zombie_ec2_cluster_delete_resource.py
+++ b/tests/unittest/cloud_governance/aws/zombie_cluster/test_zombie_ec2_cluster_delete_resource.py
@@ -203,6 +203,7 @@ def test_delete_ec2_elastic_load_balancer_v2():
assert not EC2Operations(region_name).find_load_balancer_v2(elb_name='test-load-balancer-v2')
+@pytest.mark.skip(reason="Handled by ebs_unattached")
@mock_ec2
def test_delete_ebs_volume():
"""
diff --git a/tests/unittest/cloud_governance/aws/zombie_non_cluster/test_zombie_nat_gateways.py b/tests/unittest/cloud_governance/aws/zombie_non_cluster/test_unused_nat_gateways.py
similarity index 92%
rename from tests/unittest/cloud_governance/aws/zombie_non_cluster/test_zombie_nat_gateways.py
rename to tests/unittest/cloud_governance/aws/zombie_non_cluster/test_unused_nat_gateways.py
index 4e55df33..7ade0bd8 100644
--- a/tests/unittest/cloud_governance/aws/zombie_non_cluster/test_zombie_nat_gateways.py
+++ b/tests/unittest/cloud_governance/aws/zombie_non_cluster/test_unused_nat_gateways.py
@@ -15,13 +15,13 @@ def test_nat_gateway_unused():
This method tests, deletion od unused of NatGateways
@return:
"""
- os.environ['policy'] = 'nat_gateway_unused'
+ os.environ['policy'] = 'unused_nat_gateway'
ec2_client = boto3.client('ec2', region_name=os.environ.get('AWS_DEFAULT_REGION'))
subnet_id = ec2_client.describe_subnets()['Subnets'][0].get('SubnetId')
ec2_client.create_nat_gateway(SubnetId=subnet_id)
nat_gateway_unused = NonClusterZombiePolicy()
nat_gateway_unused.set_dryrun(value='no')
- nat_gateway_unused.set_policy(value='nat_gateway_unused')
+ nat_gateway_unused.set_policy(value='unused_nat_gateway')
nat_gateway_unused.DAYS_TO_TRIGGER_RESOURCE_MAIL = -1
nat_gateway_unused._check_resource_and_delete(resource_name='Nat Gateway',
resource_id='NatGatewayId',
@@ -39,7 +39,7 @@ def test_nat_gateway_unused_not_delete():
This method tests, deletion od unused of NatGateways
@return:
"""
- os.environ['policy'] = 'nat_gateway_unused'
+ os.environ['policy'] = 'unused_nat_gateway'
tags = [
{'Key': 'Name', 'Value': 'CloudGovernanceTestZombieNatGateway'},
{'Key': 'Owner', 'Value': 'CloudGovernance'},
@@ -50,7 +50,7 @@ def test_nat_gateway_unused_not_delete():
ec2_client.create_nat_gateway(SubnetId=subnet_id, TagSpecifications=[{'ResourceType': 'nat-gateway', 'Tags': tags}])
nat_gateway_unused = NonClusterZombiePolicy()
nat_gateway_unused.set_dryrun(value='no')
- nat_gateway_unused.set_policy(value='nat_gateway_unused')
+ nat_gateway_unused.set_policy(value='unused_nat_gateway')
nat_gateway_unused.DAYS_TO_TRIGGER_RESOURCE_MAIL = -1
nat_gateway_unused._check_resource_and_delete(resource_name='Nat Gateway',
resource_id='NatGatewayId',
diff --git a/tests/unittest/cloud_governance/cloud_resource_orchestration/__init__.py b/tests/unittest/cloud_governance/cloud_resource_orchestration/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/__init__.py b/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/mock_jira.py b/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/mock_jira.py
new file mode 100644
index 00000000..11032229
--- /dev/null
+++ b/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/mock_jira.py
@@ -0,0 +1,103 @@
+from datetime import datetime, timedelta
+from functools import wraps
+from unittest.mock import patch
+
+
+from cloud_governance.common.jira.jira_operations import JiraOperations
+
+
+def get_ticket_response():
+ """
+ This method return the ticket data
+ :return:
+ """
+ created = datetime.strftime(datetime.utcnow() - timedelta(days=2), "%Y-%m-%dT%H:%M:%S")
+ response = {
+ 'key': 'MOCK-1',
+ 'fields': {
+ 'status': {'name': 'Refinement'},
+ 'created': created,
+ 'description': "First Name: Test\n"
+ "Last Name: Mock\nEmail Address: mock@gmail.com\n"
+ "Manager Approval Address: manager@gmail.com\nCC-Users: \nDays: 5\n"
+ "Project: mock-test\nRegion: ap-south-1\nFull Summary: This is the test mock test\n"
+ "Cloud Name: mock\nAccount Name: mock-account\nInstance Types: t2.micro: 5\n"
+ "Cost Estimation:12.0\nDetails: This is the test machine \n"
+ "ApprovedManager: mockapproval@gmail.com \n"
+ "Region: ap-south-1 \n"
+ }
+ }
+
+ return response
+
+
+def mock_get_issue(*args, **kwargs):
+ """
+ This method is mock for the get ticket data
+ :param kwargs:
+ :return:
+ """
+ if kwargs.get('ticket_id'):
+ return get_ticket_response()
+
+
+def mock_move_issue_state(*args, **kwargs):
+ """
+ This method is mocking for moving Jira tickets
+ :param kwargs:
+ :return:
+ """
+ if kwargs.get('ticket_id') and kwargs.get('state'):
+ return True
+ return False
+
+
+async def mock_get_all_issues(*args, **kwargs):
+ """
+ This method is mocking for search all tickets
+ :param args:
+ :param kwargs:
+ :return:
+ """
+ if kwargs.get('query'):
+ response = {
+ 'issues': {
+ 'key': 'MOCK-1',
+ 'fields': {
+ 'status': {'name': 'Refinement'},
+ 'created': datetime.utcnow() - timedelta(days=2),
+ 'description': "First Name: Test\n"
+ "Last Name: Mock\nEmail Address: mock@gmail.com\n"
+ "Manager Approval Address: manager@gmail.com\nCC-Users: \nDays: 5\n"
+ "Project: mock-test\nRegion: ap-south-1\nFull Summary: This is the test mock test\n"
+ "Cloud Name: mock\nAccount Name: mock-account\nInstance Types: t2.micro: 5\n"
+ "Cost Estimation:12.0\nDetails: This is the test machine \n"
+ "ApprovedManager: mockapproval@gmail.com \n"
+ "Region: ap-south-1 \n"
+ }
+ }
+ }
+ return response
+
+
+def mock_jira(method):
+ """
+ This method is mocking for Jira class methods which are used in Jira Operations @param method:
+ @return:
+ """
+
+ @wraps(method)
+ def method_wrapper(*args, **kwargs):
+ """
+ This is the wrapper method to wraps the method inside the function
+ @param args:
+ @param kwargs:
+ @return:
+ """
+ with patch.object(JiraOperations, 'get_issue', mock_get_issue),\
+ patch.object(JiraOperations, 'move_issue_state', mock_move_issue_state), \
+ patch.object(JiraOperations, 'get_all_issues', mock_get_all_issues):
+ result = method(*args, **kwargs)
+ return result
+
+ return method_wrapper
diff --git a/tests/unittest/cloud_governance/cloud_resource_orchestration/test_monitor_cro_instances.py b/tests/unittest/cloud_governance/cloud_resource_orchestration/test_monitor_cro_instances.py
new file mode 100644
index 00000000..83622979
--- /dev/null
+++ b/tests/unittest/cloud_governance/cloud_resource_orchestration/test_monitor_cro_instances.py
@@ -0,0 +1,33 @@
+import boto3
+from moto import mock_ec2, mock_iam, mock_cloudtrail
+
+from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.monitor_cro_instances import MonitorCROInstances
+from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.tag_cro_instances import TagCROInstances
+from cloud_governance.main.environment_variables import environment_variables
+from tests.unittest.cloud_governance.cloud_resource_orchestration.mocks.mock_jira import mock_jira
+
+AWS_DEFAULT_REGION = 'ap-south-1'
+
+
+@mock_iam
+@mock_cloudtrail
+@mock_jira
+@mock_ec2
+def test_monitor_cro_instances():
+ """
+ This method verifies the cro data is returned or not
+ :return:
+ """
+ environment_variables_dict = environment_variables.environment_variables_dict
+ environment_variables_dict['JIRA_TOKEN'] = '123456mock'
+ tags = [{'Key': 'TicketId', 'Value': '1'}]
+ ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION)
+ default_ami_id = 'ami-03cf127a'
+ ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1,
+ TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}])
+ tag_cro_instances = TagCROInstances(region_name=AWS_DEFAULT_REGION)
+ tag_cro_instances.run()
+ monitor_data = MonitorCROInstances(region_name=AWS_DEFAULT_REGION)
+ actual_result = monitor_data.run()
+ expected_result = 1
+ assert len(actual_result) == expected_result
diff --git a/tests/unittest/cloud_governance/cloud_resource_orchestration/test_tag_cro_instances.py b/tests/unittest/cloud_governance/cloud_resource_orchestration/test_tag_cro_instances.py
new file mode 100644
index 00000000..c081e0b5
--- /dev/null
+++ b/tests/unittest/cloud_governance/cloud_resource_orchestration/test_tag_cro_instances.py
@@ -0,0 +1,30 @@
+import boto3
+from moto import mock_ec2, mock_cloudtrail, mock_iam
+
+from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.tag_cro_instances import TagCROInstances
+from cloud_governance.main.environment_variables import environment_variables
+from tests.unittest.cloud_governance.cloud_resource_orchestration.mocks.mock_jira import mock_jira
+
+AWS_DEFAULT_REGION = 'ap-south-1'
+
+
+@mock_iam
+@mock_cloudtrail
+@mock_jira
+@mock_ec2
+def test_tag_cro_instances():
+ """
+ This method tests the tagging of cro instances
+ :return:
+ """
+ environment_variables_dict = environment_variables.environment_variables_dict
+ environment_variables_dict['JIRA_TOKEN'] = '123456mock'
+ tags = [{'Key': 'TicketId', 'Value': '1'}]
+ ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION)
+ default_ami_id = 'ami-03cf127a'
+ ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1,
+ TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}])
+ tag_cro_instances = TagCROInstances(region_name=AWS_DEFAULT_REGION)
+ actual_response = tag_cro_instances.run()
+ expected_response = 1
+ assert len(actual_response) == expected_response
diff --git a/tests/unittest/cloud_governance/common/clouds/aws/athena/__init__.py b/tests/unittest/cloud_governance/common/clouds/aws/athena/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/unittest/cloud_governance/common/clouds/aws/athena/test_athena_operations.py b/tests/unittest/cloud_governance/common/clouds/aws/athena/test_athena_operations.py
new file mode 100644
index 00000000..7fde7264
--- /dev/null
+++ b/tests/unittest/cloud_governance/common/clouds/aws/athena/test_athena_operations.py
@@ -0,0 +1,14 @@
+from cloud_governance.common.clouds.aws.athena.pyathena_operations import PyAthenaOperations
+from tests.unittest.cloud_governance.common.clouds.aws.mocks.aws_mock import mock_athena
+
+
+@mock_athena
+def test_execute_query():
+ """
+ This method mock athena for the PyAthena
+ :return:
+ """
+ athena_operations = PyAthenaOperations()
+ expected_result = athena_operations.execute_query(query_string="select * from mock_table")
+ actual_result = [{'A': 1, 'B': 0}, {'A': 2, 'B': 1}, {'A': 3, 'B': 2}]
+ assert expected_result == actual_result
diff --git a/tests/unittest/cloud_governance/common/clouds/aws/ec2/__init__.py b/tests/unittest/cloud_governance/common/clouds/aws/ec2/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/unittest/cloud_governance/common/clouds/aws/ec2/test_ec2_operations.py b/tests/unittest/cloud_governance/common/clouds/aws/ec2/test_ec2_operations.py
new file mode 100644
index 00000000..dd16a661
--- /dev/null
+++ b/tests/unittest/cloud_governance/common/clouds/aws/ec2/test_ec2_operations.py
@@ -0,0 +1,52 @@
+import boto3
+from moto import mock_ec2
+
+from cloud_governance.common.clouds.aws.ec2.ec2_operations import EC2Operations
+
+AWS_DEFAULT_REGION = 'ap-south-1'
+
+
+@mock_ec2
+def test_get_ec2_instance_list():
+ """
+ This method returns the list of ec2 instances
+ :return:
+ """
+ ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION)
+ ec2_operations = EC2Operations(region=AWS_DEFAULT_REGION)
+ default_ami_id = 'ami-03cf127a'
+ tags = [{'Key': 'User', 'Value': 'cloud-governance'}]
+ ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1,
+ TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}])
+ assert type(ec2_operations.get_ec2_instance_list()[0]) == dict
+
+
+@mock_ec2
+def test_get_ec2_instance_ids():
+ """
+ This method tests the return the list instance_ids
+ :return:
+ """
+ ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION)
+ ec2_operations = EC2Operations(region=AWS_DEFAULT_REGION)
+ tags = [{'Key': 'User', 'Value': 'cloud-governance'}]
+ default_ami_id = 'ami-03cf127a'
+ ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1,
+ TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}])
+ assert type(ec2_operations.get_ec2_instance_ids()[0]) == str
+
+
+@mock_ec2
+def test_tag_ec2_resources():
+ """
+ This method tests the method tagged instances by batch wise
+ :return:
+ """
+ ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION)
+ ec2_operations = EC2Operations(region=AWS_DEFAULT_REGION)
+ tags = [{'Key': 'User', 'Value': 'cloud-governance'}]
+ default_ami_id = 'ami-03cf127a'
+ for i in range(25):
+ ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1)
+ resource_ids = ec2_operations.get_ec2_instance_ids()
+ assert ec2_operations.tag_ec2_resources(client_method=ec2_client.create_tags, resource_ids=resource_ids, tags=tags) == 2
diff --git a/tests/unittest/cloud_governance/common/clouds/aws/mocks/__init__.py b/tests/unittest/cloud_governance/common/clouds/aws/mocks/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/unittest/cloud_governance/common/clouds/aws/mocks/aws_mock.py b/tests/unittest/cloud_governance/common/clouds/aws/mocks/aws_mock.py
new file mode 100644
index 00000000..32f55ee1
--- /dev/null
+++ b/tests/unittest/cloud_governance/common/clouds/aws/mocks/aws_mock.py
@@ -0,0 +1,60 @@
+from functools import wraps
+from unittest.mock import patch
+
+import pandas
+
+from cloud_governance.common.clouds.aws.athena.abstract_athena_operations import AbstractAthenaOperations
+from cloud_governance.common.clouds.aws.athena.boto3_client_athena_operations import BotoClientAthenaOperations
+from cloud_governance.common.clouds.aws.athena.pyathena_operations import PyAthenaOperations
+
+
+class ParameterNotFoundException(Exception):
+ def __init__(self, parameter_name):
+ self.parameter_name = parameter_name
+ super().__init__(f"Parameter '{parameter_name}' not found.")
+
+ def __str__(self):
+ return f"ParameterNotFoundException: Parameter '{self.parameter_name}' not found."
+
+
+def mock_execute_query(cls, *args, **kwargs):
+ """
+ This method mocks
+ :param cls:
+ :param args:
+ :param kwargs:
+ :return:
+ """
+ if kwargs.get('query_string'):
+ data = {
+ "A": [1, 2, 3],
+ "B": [0, 1, 2]
+ }
+ df1 = pandas.DataFrame(data)
+ return df1.to_dict(orient='records')
+ else:
+ raise ParameterNotFoundException('query_string')
+
+
+def mock_athena(method):
+ """
+ Mocking aws athena
+ :param method:
+ :return:
+ """
+ @wraps(method)
+ def method_wrapper(*args, **kwargs):
+ """
+ This is wrapper method to wrap the athena
+ :param args:
+ :param kwargs:
+ :return:
+ """
+ try:
+ with patch.object(PyAthenaOperations, 'execute_query', mock_execute_query), \
+ patch.object(BotoClientAthenaOperations, 'execute_query', mock_execute_query):
+ result = method(*args, **kwargs)
+ except Exception as err:
+ raise err
+ return result
+ return method_wrapper
diff --git a/tests/unittest/cloud_governance/common/ldap/test_ldap_search.py b/tests/unittest/cloud_governance/common/ldap/test_ldap_search.py
index 81ba090a..cb61c3b7 100644
--- a/tests/unittest/cloud_governance/common/ldap/test_ldap_search.py
+++ b/tests/unittest/cloud_governance/common/ldap/test_ldap_search.py
@@ -12,4 +12,4 @@ def mock_search_s(cls, base, scope, filterstr=None, attrlist=None):
@patch.object(SimpleLDAPObject, 'search_s', mock_search_s)
def test_get_details():
ldap_object = LdapSearch(ldap_host_name='example.com')
- assert list(ldap_object.get_details(user_name='test').keys()) == ['displayName', 'manager', 'cn']
+ assert list(ldap_object._LdapSearch__get_details(user_name='test').keys()) == ['displayName', 'manager', 'cn']
diff --git a/tests/unittest/cloud_governance/common/utils/__init__.py b/tests/unittest/cloud_governance/common/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/unittest/cloud_governance/common/utils/test_utils.py b/tests/unittest/cloud_governance/common/utils/test_utils.py
new file mode 100644
index 00000000..769e6357
--- /dev/null
+++ b/tests/unittest/cloud_governance/common/utils/test_utils.py
@@ -0,0 +1,21 @@
+import boto3
+from moto import mock_ec2
+
+from cloud_governance.common.clouds.aws.utils.utils import Utils
+
+
+@mock_ec2
+def test_tag_aws_resources():
+ """
+ This method tag aws resources
+ :return:
+ """
+ region_name = 'ap-south-1'
+ ec2_client = boto3.client('ec2', region_name=region_name)
+ common_utils = Utils(region=region_name)
+ resource_ids = []
+ for num in range(30):
+ instance_id = ec2_client.run_instances(MinCount=1, MaxCount=1)['Instances'][0]['InstanceId']
+ resource_ids.append(instance_id)
+ expected_res = common_utils.tag_aws_resources(ec2_client.create_tags, tags=[{'Key': 'User', 'Value': 'test'}], resource_ids=resource_ids)
+ assert expected_res == 2
diff --git a/tests/unittest/cloud_resource_orchestration/aws/test_monitor_long_run.py b/tests/unittest/cloud_resource_orchestration/aws/test_monitor_long_run.py
deleted file mode 100644
index 2c6cac8a..00000000
--- a/tests/unittest/cloud_resource_orchestration/aws/test_monitor_long_run.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import boto3
-from moto import mock_ec2
-
-from cloud_governance.cloud_resource_orchestration.aws.long_run.monitor_long_run import MonitorLongRun
-from cloud_governance.cloud_resource_orchestration.aws.long_run.tag_long_run import TagLongRun
-from tests.unittest.cloud_resource_orchestration.mocks.mock_jira_operations import jira_mock
-
-AWS_DEFAULT_REGION = 'ap-south-1'
-
-
-@jira_mock
-@mock_ec2
-def test_monitor_long_run():
- """
- This method tests monitoring of long run
- """
- default_ami_id = 'ami-03cf127a'
- ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION)
- tags = [{'Key': 'JiraId', 'Value': 'test'}]
- instance_id = ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1,
- TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}])['Instances'][0]['InstanceId']
- tag_long_run = TagLongRun(region_name=AWS_DEFAULT_REGION)
- response = tag_long_run.run()
- result = False
- if response:
- monitor_long_run = MonitorLongRun(region_name=AWS_DEFAULT_REGION)
- response = monitor_long_run.monitor_instances()
- if response:
- value = list(response.values())[0]
- result = value == instance_id
- assert result
diff --git a/tests/unittest/cloud_resource_orchestration/aws/test_tag_long_run.py b/tests/unittest/cloud_resource_orchestration/aws/test_tag_long_run.py
deleted file mode 100644
index 83be17a2..00000000
--- a/tests/unittest/cloud_resource_orchestration/aws/test_tag_long_run.py
+++ /dev/null
@@ -1,27 +0,0 @@
-
-import boto3
-from moto import mock_ec2
-
-from cloud_governance.cloud_resource_orchestration.aws.long_run.tag_long_run import TagLongRun
-from tests.unittest.cloud_resource_orchestration.mocks.mock_jira_operations import jira_mock
-
-AWS_DEFAULT_REGION = 'ap-south-1'
-
-
-@jira_mock
-@mock_ec2
-def test_tag_long_run():
- """
- This method tests the tag long run
- """
- default_ami_id = 'ami-03cf127a'
- ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION)
- tags = [{'Key': 'JiraId', 'Value': 'test'}]
- instance_id = ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1,
- TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}])['Instances'][0]['InstanceId']
- tag_long_run = TagLongRun(region_name=AWS_DEFAULT_REGION)
- response = tag_long_run.run()
- if response:
- assert response['test'][0] == instance_id
- else:
- assert False
diff --git a/tests/unittest/cloud_resource_orchestration/common/test_ec2_monitor_operations.py b/tests/unittest/cloud_resource_orchestration/common/test_ec2_monitor_operations.py
deleted file mode 100644
index ae43dc4d..00000000
--- a/tests/unittest/cloud_resource_orchestration/common/test_ec2_monitor_operations.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import time
-
-import boto3
-from moto import mock_ec2
-
-from cloud_governance.cloud_resource_orchestration.common.ec2_monitor_operations import EC2MonitorOperations
-
-AWS_DEFAULT_REGION = 'ap-south-1'
-
-
-@mock_ec2
-def test_get_instance_run_hours():
- """"
- This method tests current instance running hours
- """
- default_ami_id = 'ami-03cf127a'
- ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION)
- ec2_monitor_operations = EC2MonitorOperations(region_name=AWS_DEFAULT_REGION)
- tags = [{'Key': 'JiraId', 'Value': 'test'}]
- ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1,
- TagSpecifications=[{'ResourceType': 'instance', 'Tags': tags}])
- time.sleep(5)
- hours, _ = ec2_monitor_operations.get_instance_run_hours(instance=ec2_client.describe_instances()['Reservations'][0]['Instances'][0], jira_id='test')
- assert hours > 0
diff --git a/tests/unittest/cloud_resource_orchestration/mocks/mock_jira_operations.py b/tests/unittest/cloud_resource_orchestration/mocks/mock_jira_operations.py
deleted file mode 100644
index 979c74c7..00000000
--- a/tests/unittest/cloud_resource_orchestration/mocks/mock_jira_operations.py
+++ /dev/null
@@ -1,61 +0,0 @@
-
-from functools import wraps
-from unittest.mock import patch
-
-
-from cloud_governance.common.jira.jira_operations import JiraOperations
-
-
-def mock_get_issue(*args, **kwargs):
- """This method mock the get_issue from the jira"""
- if kwargs.get('jira_id'):
- return {'fields': {
- 'status': {'name': 'Refinement'},
- 'description': "First Name: Test\n"
- "Last Name: Mock\nEmail Address: mock@gmail.com\n"
- "Manager Approval Address: manager@gmail.com\nCC-Users: \nDays: 5\n"
- "Project: mock-test\nRegion: ap-south-1\nFull Summary: This is the test mock test\n"
- "Cloud Name: mock\nAccount Name: mock-account\nInstance Types: t2.micro: 5\n"
- "Cost Estimation:12.0\nDetails: This is the test machine \n"
- "ApprovedManager: mockapproval@gmail.com \n"
- }}
-
-
-def mock_get_jira_id_sub_tasks(*args, **kwargs):
- """This method mock get_jira_id_sub_tasks"""
- if kwargs.get('jira_id'):
- return ['subtask-1']
- return {}
-
-
-def mock_move_issue_state(*args, **kwargs):
- """this method mock mock_move_issue_state"""
- if kwargs.get('jira_id') and kwargs.get('state'):
- return True
- return False
-
-
-def jira_mock(method):
- """
- Mocking the ibm SoftLayer client methods
- @param method:
- @return:
- """
- @wraps(method)
- def method_wrapper(*args, **kwargs):
- """
- This is the wrapper method to wraps the method inside the function
- @param args:
- @param kwargs:
- @return:
- """
- result = ''
- try:
- with patch.object(JiraOperations, 'get_issue', mock_get_issue), \
- patch.object(JiraOperations, 'get_jira_id_sub_tasks', mock_get_jira_id_sub_tasks),\
- patch.object(JiraOperations, 'move_issue_state', mock_move_issue_state):
- result = method(*args, **kwargs)
- except Exception as err:
- pass
- return result
- return method_wrapper
diff --git a/tests_requirements.txt b/tests_requirements.txt
index 2e348f52..61379964 100644
--- a/tests_requirements.txt
+++ b/tests_requirements.txt
@@ -1,19 +1,18 @@
-boto3==1.26.1
+aiohttp==3.8.1
+azure-identity==1.12.0
+azure-mgmt-costmanagement==3.0.0
+azure-mgmt-billing==6.0.0
+azure-mgmt-subscription==3.1.1
+boto3==1.26.4
elasticsearch==7.11.0
elasticsearch-dsl==7.4.0
moto==2.3.2
+oauthlib~=3.1.1
pandas
-requests==2.27.1
-typeguard==2.13.3
+pytest
python-ldap==3.4.2
-SoftLayer==6.0.0
+requests==2.31.0
retry==0.9.2
-azure-identity==1.12.0
-azure-mgmt-costmanagement==3.0.0
-azure-mgmt-subscription==3.1.1
-azure-mgmt-billing==6.0.0
-
-# EC2 LongRun Required by Jira
-aiohttp==3.8.1
+typeguard==2.13.3
+SoftLayer==6.0.0
urllib3==1.26.7
-oauthlib~=3.1.1
\ No newline at end of file