Skip to content

Commit

Permalink
Fix: Update kubernetes provider name and tag. (#64)
Browse files Browse the repository at this point in the history
* fix: added label in provider data of k8s

* feat: additional tags for eks cluster only

* fix: ran terraform fmt command

* fix: lists wherever join function used

* fix: fix all lints fonud

---------

Co-authored-by: Anmol Nagpal <anmol@clouddrove.com>
  • Loading branch information
nileshgadgi and anmolnagpal committed May 7, 2024
1 parent e134bdf commit 5268f7c
Show file tree
Hide file tree
Showing 13 changed files with 65 additions and 62 deletions.
14 changes: 7 additions & 7 deletions aws_auth.tf
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ locals {
# Note that we don't need to do this for managed Node Groups since EKS adds their roles to the ConfigMap automatically
map_worker_roles = [
{
rolearn : join("", aws_iam_role.node_groups.*.arn)
rolearn : aws_iam_role.node_groups.0.arn

Check warning on line 41 in aws_auth.tf

View workflow job for this annotation

GitHub Actions / tf-lint / tflint

List items should be accessed using square brackets
username : "system:node:{{EC2PrivateDNSName}}"
groups : [
"system:bootstrappers",
Expand All @@ -53,7 +53,7 @@ data "template_file" "kubeconfig" {
template = file("${path.module}/kubeconfig.tpl")

vars = {
server = join("", aws_eks_cluster.default.*.endpoint)
server = aws_eks_cluster.default[0].endpoint
certificate_authority_data = local.certificate_authority_data
cluster_name = module.labels.id
}
Expand All @@ -74,7 +74,7 @@ resource "null_resource" "wait_for_cluster" {

data "aws_eks_cluster" "eks" {
count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0
name = join("", aws_eks_cluster.default.*.id)
name = module.labels.id
}

# Get an authentication token to communicate with the EKS cluster.
Expand All @@ -84,13 +84,13 @@ data "aws_eks_cluster" "eks" {
# https://www.terraform.io/docs/providers/aws/d/eks_cluster_auth.html
data "aws_eks_cluster_auth" "eks" {
count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0
name = join("", aws_eks_cluster.default.*.id)
name = module.labels.id
}

provider "kubernetes" {
token = join("", data.aws_eks_cluster_auth.eks.*.token)
host = join("", data.aws_eks_cluster.eks.*.endpoint)
cluster_ca_certificate = base64decode(join("", data.aws_eks_cluster.eks.*.certificate_authority.0.data))
token = data.aws_eks_cluster_auth.eks[0].token
host = data.aws_eks_cluster.eks[0].endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks[0].certificate_authority.0.data)

Check warning on line 93 in aws_auth.tf

View workflow job for this annotation

GitHub Actions / tf-lint / tflint

List items should be accessed using square brackets
}

resource "kubernetes_config_map" "aws_auth_ignore_changes" {
Expand Down
4 changes: 2 additions & 2 deletions aws_node_groups.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ module "eks_managed_node_group" {

enabled = try(each.value.enabled, true)

cluster_name = join("", aws_eks_cluster.default.*.name)
cluster_name = aws_eks_cluster.default[0].name
cluster_version = var.kubernetes_version
vpc_security_group_ids = compact(
concat(
Expand Down Expand Up @@ -72,7 +72,7 @@ module "eks_managed_node_group" {
placement = try(each.value.placement, var.managed_node_group_defaults.placement, null)

# IAM role
iam_role_arn = join("", aws_iam_role.node_groups.*.arn)
iam_role_arn = aws_iam_role.node_groups[0].arn

tags = merge(var.tags, try(each.value.tags, var.managed_node_group_defaults.tags, {}))
}
Expand Down
2 changes: 1 addition & 1 deletion fargate_profile.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ module "fargate" {
label_order = var.label_order
enabled = var.enabled
fargate_enabled = var.fargate_enabled
cluster_name = join("", aws_eks_cluster.default.*.name)
cluster_name = aws_eks_cluster.default[0].name
fargate_profiles = var.fargate_profiles
subnet_ids = var.subnet_ids

Expand Down
39 changes: 19 additions & 20 deletions iam.tf
Original file line number Diff line number Diff line change
Expand Up @@ -17,22 +17,22 @@ resource "aws_iam_role" "default" {
count = var.enabled ? 1 : 0

name = module.labels.id
assume_role_policy = join("", data.aws_iam_policy_document.assume_role.*.json)
assume_role_policy = data.aws_iam_policy_document.assume_role[0].json
permissions_boundary = var.permissions_boundary

tags = module.labels.tags
}

resource "aws_iam_role_policy_attachment" "amazon_eks_cluster_policy" {
count = var.enabled ? 1 : 0
policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", join("", data.aws_partition.current.*.partition))
role = join("", aws_iam_role.default.*.name)
policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", data.aws_partition.current.partition)
role = aws_iam_role.default[0].name
}

resource "aws_iam_role_policy_attachment" "amazon_eks_service_policy" {
count = var.enabled ? 1 : 0
policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", join("", data.aws_partition.current.*.partition))
role = join("", aws_iam_role.default.*.name)
policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", data.aws_partition.current.partition)
role = aws_iam_role.default[0].name
}

data "aws_iam_policy_document" "service_role" {
Expand All @@ -51,11 +51,10 @@ data "aws_iam_policy_document" "service_role" {
}
}


resource "aws_iam_role_policy" "service_role" {
count = var.enabled ? 1 : 0
role = join("", aws_iam_role.default.*.name)
policy = join("", data.aws_iam_policy_document.service_role.*.json)
role = aws_iam_role.default[0].name
policy = data.aws_iam_policy_document.service_role[0].json

name = module.labels.id

Expand All @@ -69,7 +68,7 @@ resource "aws_iam_role_policy" "service_role" {
resource "aws_iam_role" "node_groups" {
count = var.enabled ? 1 : 0
name = "${module.labels.id}-node_group"
assume_role_policy = join("", data.aws_iam_policy_document.node_group.*.json)
assume_role_policy = data.aws_iam_policy_document.node_group[0].json
tags = module.labels.tags
}

Expand All @@ -78,53 +77,53 @@ resource "aws_iam_role" "node_groups" {
resource "aws_iam_role_policy_attachment" "amazon_eks_cni_policy" {
count = var.enabled ? 1 : 0
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = join("", aws_iam_role.node_groups.*.name)
role = aws_iam_role.node_groups[0].name
}

resource "aws_iam_role_policy_attachment" "additional" {
for_each = { for k, v in var.iam_role_additional_policies : k => v if var.enabled }

policy_arn = each.value
role = join("", aws_iam_role.node_groups.*.name)
role = aws_iam_role.node_groups[0].name
}

#Module : IAM ROLE POLICY ATTACHMENT EC2 CONTAINER REGISTRY READ ONLY
#Description : Attaches a Managed IAM Policy to an IAM role.
resource "aws_iam_role_policy_attachment" "amazon_ec2_container_registry_read_only" {
count = var.enabled ? 1 : 0
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = join("", aws_iam_role.node_groups.*.name)
role = aws_iam_role.node_groups[0].name
}

resource "aws_iam_policy" "amazon_eks_node_group_autoscaler_policy" {
count = var.enabled ? 1 : 0
name = format("%s-node-group-policy", module.labels.id)
policy = join("", data.aws_iam_policy_document.amazon_eks_node_group_autoscaler_policy.*.json)
policy = data.aws_iam_policy_document.amazon_eks_node_group_autoscaler_policy[0].json
}

resource "aws_iam_role_policy_attachment" "amazon_eks_node_group_autoscaler_policy" {
count = var.enabled ? 1 : 0
policy_arn = join("", aws_iam_policy.amazon_eks_node_group_autoscaler_policy.*.arn)
role = join("", aws_iam_role.node_groups.*.name)
policy_arn = aws_iam_policy.amazon_eks_node_group_autoscaler_policy[0].arn
role = aws_iam_role.node_groups[0].name
}

resource "aws_iam_policy" "amazon_eks_worker_node_autoscaler_policy" {
count = var.enabled ? 1 : 0
name = "${module.labels.id}-autoscaler"
path = "/"
policy = join("", data.aws_iam_policy_document.amazon_eks_node_group_autoscaler_policy.*.json)
policy = data.aws_iam_policy_document.amazon_eks_node_group_autoscaler_policy[0].json
}

resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_autoscaler_policy" {
count = var.enabled ? 1 : 0
policy_arn = join("", aws_iam_policy.amazon_eks_worker_node_autoscaler_policy.*.arn)
role = join("", aws_iam_role.node_groups.*.name)
policy_arn = aws_iam_policy.amazon_eks_worker_node_autoscaler_policy[0].arn
role = aws_iam_role.node_groups[0].name
}

resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_policy" {
count = var.enabled ? 1 : 0
policy_arn = format("%s/%s", local.aws_policy_prefix, "AmazonEKSWorkerNodePolicy")
role = join("", aws_iam_role.node_groups.*.name)
role = aws_iam_role.node_groups[0].name
}

data "aws_iam_policy_document" "node_group" {
Expand Down Expand Up @@ -167,5 +166,5 @@ data "aws_iam_policy_document" "amazon_eks_node_group_autoscaler_policy" {
resource "aws_iam_instance_profile" "default" {
count = var.enabled ? 1 : 0
name = format("%s-instance-profile", module.labels.id)
role = join("", aws_iam_role.node_groups.*.name)
role = aws_iam_role.node_groups[0].name
}
2 changes: 1 addition & 1 deletion kms.tf
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ data "aws_iam_policy_document" "cloudwatch" {
identifiers = [
format(
"arn:%s:iam::%s:root",
join("", data.aws_partition.current.*.partition),
data.aws_partition.current.partition,
data.aws_caller_identity.current.account_id
)
]
Expand Down
7 changes: 2 additions & 5 deletions locals.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,9 @@ locals {
# Encryption
cluster_encryption_config = {
resources = var.cluster_encryption_config_resources
provider_key_arn = var.enabled ? join("", aws_kms_key.cluster.*.arn) : null
provider_key_arn = var.enabled ? aws_kms_key.cluster[0].arn : null
}
aws_policy_prefix = format("arn:%s:iam::aws:policy", join("", data.aws_partition.current.*.partition))
aws_policy_prefix = format("arn:%s:iam::aws:policy", data.aws_partition.current.partition)
create_outposts_local_cluster = length(var.outpost_config) > 0

}



20 changes: 11 additions & 9 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,17 @@ resource "aws_cloudwatch_log_group" "default" {
name = "/aws/eks/${module.labels.id}/cluster"
retention_in_days = var.cluster_log_retention_period
tags = module.labels.tags
kms_key_id = join("", aws_kms_key.cloudwatch_log.*.arn)
kms_key_id = aws_kms_key.cloudwatch_log[0].arn
}

#tfsec:ignore:aws-eks-no-public-cluster-access ## To provide eks endpoint public access from local network
#tfsec:ignore:aws-eks-no-public-cluster-access-to-cidr ## To provide eks endpoint public access from local network
resource "aws_eks_cluster" "default" {
count = var.enabled ? 1 : 0
name = module.labels.id
role_arn = join("", aws_iam_role.default.*.arn)
role_arn = aws_iam_role.default[0].arn
version = var.kubernetes_version
enabled_cluster_log_types = var.enabled_cluster_log_types
tags = module.labels.tags


vpc_config {
subnet_ids = var.subnet_ids
Expand Down Expand Up @@ -79,32 +77,36 @@ resource "aws_eks_cluster" "default" {
}
}

tags = merge(
module.labels.tags,
var.eks_tags
)

depends_on = [
aws_iam_role_policy_attachment.amazon_eks_cluster_policy,
aws_iam_role_policy_attachment.amazon_eks_service_policy,
aws_cloudwatch_log_group.default,

]
}

data "tls_certificate" "cluster" {
count = var.enabled && var.oidc_provider_enabled ? 1 : 0
url = join("", aws_eks_cluster.default.*.identity.0.oidc.0.issuer)
url = aws_eks_cluster.default[0].identity.0.oidc.0.issuer

Check warning on line 94 in main.tf

View workflow job for this annotation

GitHub Actions / tf-lint / tflint

List items should be accessed using square brackets
}

resource "aws_iam_openid_connect_provider" "default" {
count = var.enabled && var.oidc_provider_enabled ? 1 : 0
url = join("", aws_eks_cluster.default.*.identity.0.oidc.0.issuer)
url = aws_eks_cluster.default[0].identity.0.oidc.0.issuer

client_id_list = distinct(compact(concat(["sts.${data.aws_partition.current.dns_suffix}"], var.openid_connect_audiences)))
thumbprint_list = [join("", data.tls_certificate.cluster.*.certificates.0.sha1_fingerprint)]
thumbprint_list = [data.tls_certificate.cluster[0].certificates.0.sha1_fingerprint]
tags = module.labels.tags
}

resource "aws_eks_addon" "cluster" {
for_each = var.enabled ? { for addon in var.addons : addon.addon_name => addon } : {}

cluster_name = join("", aws_eks_cluster.default.*.name)
cluster_name = aws_eks_cluster.default[0].name
addon_name = each.key
addon_version = lookup(each.value, "addon_version", null)
resolve_conflicts_on_create = lookup(each.value, "resolve_conflicts", null)
Expand Down
4 changes: 2 additions & 2 deletions node_group/fargate_profile/fargate.tf
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,15 @@ resource "aws_iam_role" "fargate_role" {
count = var.enabled && var.fargate_enabled ? 1 : 0

name = format("%s-fargate-role", module.labels.id)
assume_role_policy = join("", data.aws_iam_policy_document.aws_eks_fargate_policy.*.json)
assume_role_policy = data.aws_iam_policy_document.aws_eks_fargate_policy[0].json
tags = module.labels.tags
}

resource "aws_iam_role_policy_attachment" "amazon_eks_fargate_pod_execution_role_policy" {
count = var.enabled && var.fargate_enabled ? 1 : 0

policy_arn = "arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy"
role = join("", aws_iam_role.fargate_role.*.name)
role = aws_iam_role.fargate_role[0].name
}

#Module : EKS Fargate
Expand Down
14 changes: 7 additions & 7 deletions node_group/self_managed/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,10 @@ resource "aws_launch_template" "this" {
name = module.labels.id

ebs_optimized = var.ebs_optimized
image_id = join("", data.aws_ami.eks_default.*.image_id)
image_id = data.aws_ami.eks_default[0].image_id
instance_type = var.instance_type
key_name = var.key_name
user_data = base64encode(join("", data.template_file.userdata.*.rendered))
user_data = base64encode(data.template_file.userdata[0].rendered)
disable_api_termination = var.disable_api_termination
instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior
kernel_id = var.kernel_id
Expand Down Expand Up @@ -253,8 +253,8 @@ resource "aws_autoscaling_group" "this" {
for_each = var.use_mixed_instances_policy ? [] : [1]

content {
name = join("", aws_launch_template.this.*.name)
version = join("", aws_launch_template.this.*.latest_version)
name = aws_launch_template.this[0].name
version = aws_launch_template.this[0].latest_version
}
}

Expand Down Expand Up @@ -333,8 +333,8 @@ resource "aws_autoscaling_group" "this" {

launch_template {
launch_template_specification {
launch_template_name = join("", aws_launch_template.this.*.name)
version = join("", aws_launch_template.this.*.latest_version)
launch_template_name = aws_launch_template.this[0].name
version = aws_launch_template.this[0].latest_version
}

dynamic "override" {
Expand Down Expand Up @@ -392,7 +392,7 @@ resource "aws_autoscaling_schedule" "this" {
for_each = var.enabled && var.create_schedule ? var.schedules : {}

scheduled_action_name = each.key
autoscaling_group_name = join("", aws_autoscaling_group.this.*.name)
autoscaling_group_name = aws_autoscaling_group.this[0].name

min_size = lookup(each.value, "min_size", null)
max_size = lookup(each.value, "max_size", null)
Expand Down
2 changes: 1 addition & 1 deletion node_group/self_managed/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ variable "ebs_optimized" {

variable "instance_type" {
type = string
default = ""
default = "t3.medium"
description = "The type of the instance to launch"
}

Expand Down
10 changes: 5 additions & 5 deletions security_groups.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ resource "aws_security_group_rule" "node_group" {
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = join("", aws_security_group.node_group.*.id)
security_group_id = aws_security_group.node_group[0].id
type = "egress"
}

Expand All @@ -34,8 +34,8 @@ resource "aws_security_group_rule" "ingress_self" {
from_port = 0
to_port = 65535
protocol = "-1"
security_group_id = join("", aws_security_group.node_group.*.id)
source_security_group_id = join("", aws_security_group.node_group.*.id)
security_group_id = aws_security_group.node_group[0].id
source_security_group_id = aws_security_group.node_group[0].id
type = "ingress"
}

Expand All @@ -49,7 +49,7 @@ resource "aws_security_group_rule" "ingress_security_groups_node_group" {
to_port = 65535
protocol = "-1"
source_security_group_id = element(var.allowed_security_groups, count.index)
security_group_id = join("", aws_security_group.node_group.*.id)
security_group_id = aws_security_group.node_group[0].id
type = "ingress"
}

Expand All @@ -63,6 +63,6 @@ resource "aws_security_group_rule" "ingress_cidr_blocks_node_group" {
to_port = 0
protocol = "-1"
cidr_blocks = var.allowed_cidr_blocks
security_group_id = join("", aws_security_group.node_group.*.id)
security_group_id = aws_security_group.node_group[0].id
type = "ingress"
}
Loading

0 comments on commit 5268f7c

Please sign in to comment.