Skip to content

Commit

Permalink
add more features to gcpmmp
Browse files Browse the repository at this point in the history
fix tests

fix goimports

add comment

lint

generate

fix
  • Loading branch information
gzcharleszhang committed Dec 6, 2023
1 parent 0ea6fdd commit 776d542
Show file tree
Hide file tree
Showing 12 changed files with 741 additions and 156 deletions.
55 changes: 45 additions & 10 deletions cloud/scope/managedmachinepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"strings"

"k8s.io/utils/pointer"
infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-gcp/cloud"
"sigs.k8s.io/cluster-api-provider-gcp/util/location"

Expand Down Expand Up @@ -155,34 +156,68 @@ func (s *ManagedMachinePoolScope) NodePoolVersion() *string {
return s.MachinePool.Spec.Template.Spec.Version
}

// NodePoolResourceLabels returns the resource labels of the node pool.
func NodePoolResourceLabels(additionalLabels infrav1.Labels, clusterName string) infrav1.Labels {
resourceLabels := additionalLabels.DeepCopy()
if resourceLabels == nil {
resourceLabels = infrav1.Labels{}
}
resourceLabels[infrav1.ClusterTagKey(clusterName)] = string(infrav1.ResourceLifecycleOwned)
return resourceLabels
}

// ConvertToSdkNodePool converts a node pool to format that is used by GCP SDK.
func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool clusterv1exp.MachinePool, regional bool) *containerpb.NodePool {
func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool clusterv1exp.MachinePool, regional bool, clusterName string) *containerpb.NodePool {
replicas := *machinePool.Spec.Replicas
if regional {
replicas /= cloud.DefaultNumRegionsPerZone
}
nodePoolName := nodePool.Spec.NodePoolName
if len(nodePoolName) == 0 {
// Use the GCPManagedMachinePool CR name if nodePoolName is not specified
nodePoolName = nodePool.Name
}
// build node pool in GCP SDK format using the GCPManagedMachinePool spec
sdkNodePool := containerpb.NodePool{
Name: nodePoolName,
InitialNodeCount: replicas,
Config: &containerpb.NodeConfig{
Labels: nodePool.Spec.KubernetesLabels,
Taints: infrav1exp.ConvertToSdkTaint(nodePool.Spec.KubernetesTaints),
Metadata: nodePool.Spec.AdditionalLabels,
Labels: nodePool.Spec.KubernetesLabels,
Taints: infrav1exp.ConvertToSdkTaint(nodePool.Spec.KubernetesTaints),
ShieldedInstanceConfig: &containerpb.ShieldedInstanceConfig{
EnableSecureBoot: pointer.BoolDeref(nodePool.Spec.NodeSecurity.EnableSecureBoot, false),
EnableIntegrityMonitoring: pointer.BoolDeref(nodePool.Spec.NodeSecurity.EnableIntegrityMonitoring, false),
},
ResourceLabels: NodePoolResourceLabels(nodePool.Spec.AdditionalLabels, clusterName),
},
}
if nodePool.Spec.MachineType != nil {
sdkNodePool.Config.MachineType = *nodePool.Spec.MachineType
}
if nodePool.Spec.DiskSizeGb != nil {
sdkNodePool.Config.DiskSizeGb = *nodePool.Spec.DiskSizeGb
}
if nodePool.Spec.ImageType != nil {
sdkNodePool.Config.ImageType = *nodePool.Spec.ImageType
}
if nodePool.Spec.LocalSsdCount != nil {
sdkNodePool.Config.LocalSsdCount = *nodePool.Spec.LocalSsdCount
}
if nodePool.Spec.DiskType != nil {
sdkNodePool.Config.DiskType = string(*nodePool.Spec.DiskType)
}
if nodePool.Spec.Scaling != nil {
sdkNodePool.Autoscaling = &containerpb.NodePoolAutoscaling{
Enabled: true,
MinNodeCount: *nodePool.Spec.Scaling.MinCount,
MaxNodeCount: *nodePool.Spec.Scaling.MaxCount,
sdkNodePool.Autoscaling = infrav1exp.ConvertToSdkAutoscaling(nodePool.Spec.Scaling)
}
if nodePool.Spec.Management != nil {
sdkNodePool.Management = &containerpb.NodeManagement{
AutoRepair: nodePool.Spec.Management.AutoRepair,
AutoUpgrade: nodePool.Spec.Management.AutoUpgrade,
}
}
if nodePool.Spec.MaxPodsPerNode != nil {
sdkNodePool.MaxPodsConstraint = &containerpb.MaxPodsConstraint{
MaxPodsPerNode: *nodePool.Spec.MaxPodsPerNode,
}
}
if nodePool.Spec.InstanceType != nil {
Expand Down Expand Up @@ -234,10 +269,10 @@ func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool
}

// ConvertToSdkNodePools converts node pools to format that is used by GCP SDK.
func ConvertToSdkNodePools(nodePools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1exp.MachinePool, regional bool) []*containerpb.NodePool {
func ConvertToSdkNodePools(nodePools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1exp.MachinePool, regional bool, clusterName string) []*containerpb.NodePool {
res := []*containerpb.NodePool{}
for i := range nodePools {
res = append(res, ConvertToSdkNodePool(nodePools[i], machinePools[i], regional))
res = append(res, ConvertToSdkNodePool(nodePools[i], machinePools[i], regional, clusterName))
}
return res
}
Expand Down
138 changes: 138 additions & 0 deletions cloud/scope/managedmachinepool_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
package scope

import (
"cloud.google.com/go/container/apiv1/containerpb"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-gcp/cloud"
"sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1"
clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1"
)

var TestGCPMMP *v1beta1.GCPManagedMachinePool
var TestMP *clusterv1exp.MachinePool
var TestClusterName string

var _ = Describe("GCPManagedMachinePool Scope", func() {
BeforeEach(func() {
TestClusterName = "test-cluster"
gcpmmpName := "test-gcpmmp"
nodePoolName := "test-pool"
namespace := "capg-system"
replicas := int32(1)

TestGCPMMP = &v1beta1.GCPManagedMachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: gcpmmpName,
Namespace: namespace,
},
Spec: v1beta1.GCPManagedMachinePoolSpec{
NodePoolName: nodePoolName,
},
}
TestMP = &clusterv1exp.MachinePool{
Spec: clusterv1exp.MachinePoolSpec{
Replicas: &replicas,
},
}
})

Context("Test NodePoolResourceLabels", func() {
It("should append cluster owned label", func() {
labels := infrav1.Labels{"test-key": "test-value"}

Expect(NodePoolResourceLabels(labels, TestClusterName)).To(Equal(infrav1.Labels{
"test-key": "test-value",
infrav1.ClusterTagKey(TestClusterName): string(infrav1.ResourceLifecycleOwned),
}))
})
})

Context("Test ConvertToSdkNodePool", func() {
It("should convert to SDK node pool with default values", func() {
sdkNodePool := ConvertToSdkNodePool(*TestGCPMMP, *TestMP, false, TestClusterName)

Expect(sdkNodePool).To(Equal(&containerpb.NodePool{
Name: TestGCPMMP.Spec.NodePoolName,
InitialNodeCount: *TestMP.Spec.Replicas,
Config: &containerpb.NodeConfig{
ResourceLabels: NodePoolResourceLabels(nil, TestClusterName),
ShieldedInstanceConfig: &containerpb.ShieldedInstanceConfig{},
},
}))
})

It("should convert to SDK node pool node count in a regional cluster", func() {
replicas := int32(6)
TestMP.Spec.Replicas = &replicas

sdkNodePool := ConvertToSdkNodePool(*TestGCPMMP, *TestMP, true, TestClusterName)

Expect(sdkNodePool).To(Equal(&containerpb.NodePool{
Name: TestGCPMMP.Spec.NodePoolName,
InitialNodeCount: replicas / cloud.DefaultNumRegionsPerZone,
Config: &containerpb.NodeConfig{
ResourceLabels: NodePoolResourceLabels(nil, TestClusterName),
ShieldedInstanceConfig: &containerpb.ShieldedInstanceConfig{},
},
}))
})

It("should convert to SDK node pool using GCPManagedMachinePool", func() {
machineType := "n1-standard-1"
diskSizeGb := int32(128)
imageType := "ubuntu_containerd"
localSsdCount := int32(2)
diskType := v1beta1.SSD
maxPodsPerNode := int64(20)
enableAutoscaling := false
scaling := v1beta1.NodePoolAutoScaling{
EnableAutoscaling: &enableAutoscaling,
}
labels := infrav1.Labels{"test-key": "test-value"}
taints := v1beta1.Taints{
{
Key: "test-key",
Value: "test-value",
Effect: "NoSchedule",
},
}
resourceLabels := infrav1.Labels{"test-key": "test-value"}

TestGCPMMP.Spec.MachineType = &machineType
TestGCPMMP.Spec.DiskSizeGb = &diskSizeGb
TestGCPMMP.Spec.ImageType = &imageType
TestGCPMMP.Spec.LocalSsdCount = &localSsdCount
TestGCPMMP.Spec.DiskType = &diskType
TestGCPMMP.Spec.Scaling = &scaling
TestGCPMMP.Spec.MaxPodsPerNode = &maxPodsPerNode
TestGCPMMP.Spec.KubernetesLabels = labels
TestGCPMMP.Spec.KubernetesTaints = taints
TestGCPMMP.Spec.AdditionalLabels = resourceLabels

sdkNodePool := ConvertToSdkNodePool(*TestGCPMMP, *TestMP, false, TestClusterName)

Expect(sdkNodePool).To(Equal(&containerpb.NodePool{
Name: TestGCPMMP.Spec.NodePoolName,
InitialNodeCount: *TestMP.Spec.Replicas,
Config: &containerpb.NodeConfig{
Labels: labels,
Taints: v1beta1.ConvertToSdkTaint(taints),
ResourceLabels: NodePoolResourceLabels(resourceLabels, TestClusterName),
MachineType: machineType,
DiskSizeGb: diskSizeGb,
ImageType: imageType,
LocalSsdCount: localSsdCount,
DiskType: string(diskType),
ShieldedInstanceConfig: &containerpb.ShieldedInstanceConfig{},
},
Autoscaling: v1beta1.ConvertToSdkAutoscaling(&scaling),
MaxPodsConstraint: &containerpb.MaxPodsConstraint{
MaxPodsPerNode: maxPodsPerNode,
},
}))
})
})
})
13 changes: 13 additions & 0 deletions cloud/scope/scope_suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
package scope_test

import (
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

func TestScope(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Scope Suite")
}
2 changes: 1 addition & 1 deletion cloud/services/container/clusters/reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ func (s *Service) createCluster(ctx context.Context, log *logr.Logger) error {
cluster.InitialClusterVersion = *s.scope.GCPManagedControlPlane.Spec.ControlPlaneVersion
}
if !s.scope.IsAutopilotCluster() {
cluster.NodePools = scope.ConvertToSdkNodePools(nodePools, machinePools, isRegional)
cluster.NodePools = scope.ConvertToSdkNodePools(nodePools, machinePools, isRegional, cluster.Name)
}

createClusterRequest := &containerpb.CreateClusterRequest{
Expand Down
Loading

0 comments on commit 776d542

Please sign in to comment.