From 7211b96d068246769dddfcb6f9fdc8547c122551 Mon Sep 17 00:00:00 2001 From: Richard Case Date: Thu, 23 Feb 2023 16:36:51 +0000 Subject: [PATCH] feat: changes to for gke e2e testing Adds GKE e2e tests. These tests will need to be expanded over time. Adds a result of adding these tests various other changes where made including: - Adding docs about the permissions required - Removed initial node count of the AWSManagedMachinePool - Added validation to error if replicas isn't a multiple of 3 for a region based cluster (which all gke clusters are initially) Signed-off-by: Richard Case --- cloud/defaults.go | 7 +- cloud/providerid/doc.go | 18 ++ cloud/providerid/providerid.go | 89 ++++++++ cloud/providerid/providerid_test.go | 116 ++++++++++ cloud/scope/machine.go | 5 +- cloud/scope/managedcontrolplane.go | 5 + cloud/scope/managedmachinepool.go | 13 +- cloud/services/container/clusters/errors.go | 41 ++++ .../services/container/clusters/kubeconfig.go | 30 ++- .../services/container/clusters/reconcile.go | 91 +++++--- .../services/container/nodepools/reconcile.go | 50 ++++- cloud/services/shared/doc.go | 18 ++ cloud/services/shared/machinepool.go | 66 ++++++ ...ster.x-k8s.io_gcpmanagedcontrolplanes.yaml | 31 ++- ...uster.x-k8s.io_gcpmanagedmachinepools.yaml | 8 - config/manager/manager.yaml | 1 + config/rbac/role.yaml | 4 + docs/book/src/topics/gke/enabling.md | 2 + docs/book/src/topics/prerequisites.md | 6 +- .../v1beta1/gcpmanagedcontrolplane_types.go | 15 +- .../v1beta1/gcpmanagedcontrolplane_webhook.go | 4 + .../v1beta1/gcpmanagedmachinepool_types.go | 3 - .../v1beta1/gcpmanagedmachinepool_webhook.go | 28 --- .../gcpmanagedcluster_controller.go | 31 ++- .../gcpmanagedcontrolplane_controller.go | 36 ++- .../gcpmanagedmachinepool_controller.go | 34 ++- templates/cluster-template-gke-autopilot.yaml | 36 +++ templates/cluster-template-gke.yaml | 3 +- test/e2e/config/gcp-ci.yaml | 8 + .../cluster-template-ci-gke-autopilot.yaml | 37 ++++ .../cluster-template-ci-gke.yaml | 63 ++++++ test/e2e/e2e_gke_test.go | 162 ++++++++++++++ test/e2e/gke.go | 208 ++++++++++++++++++ test/e2e/suite_test.go | 2 + 34 files changed, 1118 insertions(+), 153 deletions(-) create mode 100644 cloud/providerid/doc.go create mode 100644 cloud/providerid/providerid.go create mode 100644 cloud/providerid/providerid_test.go create mode 100644 cloud/services/container/clusters/errors.go create mode 100644 cloud/services/shared/doc.go create mode 100644 cloud/services/shared/machinepool.go create mode 100644 templates/cluster-template-gke-autopilot.yaml create mode 100644 test/e2e/data/infrastructure-gcp/cluster-template-ci-gke-autopilot.yaml create mode 100644 test/e2e/data/infrastructure-gcp/cluster-template-ci-gke.yaml create mode 100644 test/e2e/e2e_gke_test.go create mode 100644 test/e2e/gke.go diff --git a/cloud/defaults.go b/cloud/defaults.go index 1b1eea703..1d47038d0 100644 --- a/cloud/defaults.go +++ b/cloud/defaults.go @@ -1,12 +1,9 @@ /* Copyright 2021 The Kubernetes Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,6 +14,6 @@ limitations under the License. package cloud const ( - // ProviderIDPrefix is the gce provider id prefix. - ProviderIDPrefix = "gce://" + // DefaultNumRegionsPerZone is the default number of zones per region. + DefaultNumRegionsPerZone = 3 ) diff --git a/cloud/providerid/doc.go b/cloud/providerid/doc.go new file mode 100644 index 000000000..346c0fafc --- /dev/null +++ b/cloud/providerid/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package providerid implements functionality for creating kubernetes provider ids for nodes. +package providerid diff --git a/cloud/providerid/providerid.go b/cloud/providerid/providerid.go new file mode 100644 index 000000000..21f59042f --- /dev/null +++ b/cloud/providerid/providerid.go @@ -0,0 +1,89 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package providerid + +import ( + "errors" + "fmt" + "path" + + "sigs.k8s.io/cluster-api-provider-gcp/util/resourceurl" +) + +const ( + // Prefix is the gce provider id prefix. + Prefix = "gce://" +) + +// ProviderID represents the id for a GCP cluster. +type ProviderID interface { + Project() string + Location() string + Name() string + fmt.Stringer +} + +// NewFromResourceURL creates a provider from a GCP resource url. +func NewFromResourceURL(url string) (ProviderID, error) { + resourceURL, err := resourceurl.Parse(url) + if err != nil { + return nil, fmt.Errorf("parsing resource url %s: %w", url, err) + } + + return New(resourceURL.Project, resourceURL.Location, resourceURL.Name) +} + +// New creates a new provider id. +func New(project, location, name string) (ProviderID, error) { + if project == "" { + return nil, errors.New("project required for provider id") + } + if location == "" { + return nil, errors.New("location required for provider id") + } + if name == "" { + return nil, errors.New("name required for provider id") + } + + return &providerID{ + project: project, + location: location, + name: name, + }, nil +} + +type providerID struct { + project string + location string + name string +} + +func (p *providerID) Project() string { + return p.project +} + +func (p *providerID) Location() string { + return p.location +} + +func (p *providerID) Name() string { + return p.name +} + +func (p *providerID) String() string { + return Prefix + path.Join(p.project, p.location, p.name) +} diff --git a/cloud/providerid/providerid_test.go b/cloud/providerid/providerid_test.go new file mode 100644 index 000000000..2de808b9d --- /dev/null +++ b/cloud/providerid/providerid_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package providerid_test + +import ( + "testing" + + . "github.com/onsi/gomega" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/providerid" +) + +func TestProviderID_New(t *testing.T) { + RegisterTestingT(t) + + testCases := []struct { + testname string + project string + location string + name string + expectedProviderID string + expectError bool + }{ + { + testname: "no project, should fail", + project: "", + location: "eu-west4", + name: "vm1", + expectError: true, + }, + { + testname: "no location, should fail", + project: "proj1", + location: "", + name: "vm1", + expectError: true, + }, + { + testname: "no name, should fail", + project: "proj1", + location: "eu-west4", + name: "", + expectError: true, + }, + { + testname: "with all details, should pass", + project: "proj1", + location: "eu-west4", + name: "vm1", + expectError: false, + expectedProviderID: "gce://proj1/eu-west4/vm1", + }, + } + + for _, tc := range testCases { + t.Run(tc.testname, func(t *testing.T) { + providerID, err := providerid.New(tc.project, tc.location, tc.name) + + if tc.expectError { + Expect(err).To(HaveOccurred()) + } else { + Expect(err).NotTo(HaveOccurred()) + Expect(providerID.String()).To(Equal(tc.expectedProviderID)) + } + }) + } +} + +func TestProviderID_NewFromResourceURL(t *testing.T) { + RegisterTestingT(t) + + testCases := []struct { + testname string + resourceURL string + expectedProviderID string + expectError bool + }{ + { + testname: "invalid url, should fail", + resourceURL: "hvfnhdkdk", + expectError: true, + }, + { + testname: "valid instance url, should pass", + resourceURL: "https://www.googleapis.com/compute/v1/projects/myproject/zones/europe-west2-a/instances/gke-capg-dskczmdculd-capg-e2e-ebs0oy--014f89ba-sx2p", + expectError: false, + expectedProviderID: "gce://myproject/europe-west2-a/gke-capg-dskczmdculd-capg-e2e-ebs0oy--014f89ba-sx2p", + }, + } + + for _, tc := range testCases { + t.Run(tc.testname, func(t *testing.T) { + providerID, err := providerid.NewFromResourceURL(tc.resourceURL) + + if tc.expectError { + Expect(err).To(HaveOccurred()) + } else { + Expect(err).NotTo(HaveOccurred()) + Expect(providerID.String()).To(Equal(tc.expectedProviderID)) + } + }) + } +} diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go index 209e52c06..80410fbe8 100644 --- a/cloud/scope/machine.go +++ b/cloud/scope/machine.go @@ -34,6 +34,7 @@ import ( "k8s.io/utils/pointer" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/providerid" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/noderefutil" capierrors "sigs.k8s.io/cluster-api/errors" @@ -169,8 +170,8 @@ func (m *MachineScope) GetProviderID() string { // SetProviderID sets the GCPMachine providerID in spec. func (m *MachineScope) SetProviderID() { - providerID := cloud.ProviderIDPrefix + path.Join(m.ClusterGetter.Project(), m.Zone(), m.Name()) - m.GCPMachine.Spec.ProviderID = pointer.StringPtr(providerID) + providerID, _ := providerid.New(m.ClusterGetter.Project(), m.Zone(), m.Name()) + m.GCPMachine.Spec.ProviderID = pointer.StringPtr(providerID.String()) } // GetInstanceStatus returns the GCPMachine instance status. diff --git a/cloud/scope/managedcontrolplane.go b/cloud/scope/managedcontrolplane.go index 0a7bf0b52..cfaec26b1 100644 --- a/cloud/scope/managedcontrolplane.go +++ b/cloud/scope/managedcontrolplane.go @@ -215,3 +215,8 @@ func (s *ManagedControlPlaneScope) SetEndpoint(host string) { Port: APIServerPort, } } + +// IsAutopilotCluster returns true if this is an autopilot cluster. +func (s *ManagedControlPlaneScope) IsAutopilotCluster() bool { + return s.GCPManagedControlPlane.Spec.EnableAutopilot +} diff --git a/cloud/scope/managedmachinepool.go b/cloud/scope/managedmachinepool.go index 716a533e0..7f0dea927 100644 --- a/cloud/scope/managedmachinepool.go +++ b/cloud/scope/managedmachinepool.go @@ -20,6 +20,7 @@ import ( "context" "fmt" + "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/util/location" "sigs.k8s.io/cluster-api/util/conditions" @@ -153,14 +154,18 @@ func (s *ManagedMachinePoolScope) NodePoolVersion() *string { } // ConvertToSdkNodePool converts a node pool to format that is used by GCP SDK. -func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool clusterv1exp.MachinePool) *containerpb.NodePool { +func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool clusterv1exp.MachinePool, regional bool) *containerpb.NodePool { + replicas := *machinePool.Spec.Replicas + if regional { + replicas /= cloud.DefaultNumRegionsPerZone + } nodePoolName := nodePool.Spec.NodePoolName if len(nodePoolName) == 0 { nodePoolName = nodePool.Name } sdkNodePool := containerpb.NodePool{ Name: nodePoolName, - InitialNodeCount: nodePool.Spec.InitialNodeCount, + InitialNodeCount: replicas, Config: &containerpb.NodeConfig{ Labels: nodePool.Spec.KubernetesLabels, Taints: infrav1exp.ConvertToSdkTaint(nodePool.Spec.KubernetesTaints), @@ -181,10 +186,10 @@ func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool } // ConvertToSdkNodePools converts node pools to format that is used by GCP SDK. -func ConvertToSdkNodePools(nodePools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1exp.MachinePool) []*containerpb.NodePool { +func ConvertToSdkNodePools(nodePools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1exp.MachinePool, regional bool) []*containerpb.NodePool { res := []*containerpb.NodePool{} for i := range nodePools { - res = append(res, ConvertToSdkNodePool(nodePools[i], machinePools[i])) + res = append(res, ConvertToSdkNodePool(nodePools[i], machinePools[i], regional)) } return res } diff --git a/cloud/services/container/clusters/errors.go b/cloud/services/container/clusters/errors.go new file mode 100644 index 000000000..b3bbf3fa5 --- /dev/null +++ b/cloud/services/container/clusters/errors.go @@ -0,0 +1,41 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusters + +import ( + "fmt" + + "github.com/pkg/errors" +) + +var ( + // ErrAutopilotClusterMachinePoolsNotAllowed is used when there are machine pools specified for an autopilot enabled cluster. + ErrAutopilotClusterMachinePoolsNotAllowed = errors.New("cannot use machine pools with an autopilot enabled cluster") +) + +// NewErrUnexpectedClusterStatus creates a new error for an unexpected cluster status. +func NewErrUnexpectedClusterStatus(status string) error { + return &errUnexpectedClusterStatus{status} +} + +type errUnexpectedClusterStatus struct { + status string +} + +func (e *errUnexpectedClusterStatus) Error() string { + return fmt.Sprintf("unexpected error status: %s", e.status) +} diff --git a/cloud/services/container/clusters/kubeconfig.go b/cloud/services/container/clusters/kubeconfig.go index 255857430..7c3554c32 100644 --- a/cloud/services/container/clusters/kubeconfig.go +++ b/cloud/services/container/clusters/kubeconfig.go @@ -23,6 +23,7 @@ import ( "cloud.google.com/go/container/apiv1/containerpb" "cloud.google.com/go/iam/credentials/apiv1/credentialspb" + "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -40,7 +41,8 @@ const ( GkeScope = "https://www.googleapis.com/auth/cloud-platform" ) -func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *containerpb.Cluster) error { +func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *containerpb.Cluster, log *logr.Logger) error { + log.Info("Reconciling kubeconfig") clusterRef := types.NamespacedName{ Name: s.scope.Cluster.Name, Namespace: s.scope.Cluster.Namespace, @@ -49,13 +51,16 @@ func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *containerpb. configSecret, err := secret.GetFromNamespacedName(ctx, s.scope.Client(), clusterRef, secret.Kubeconfig) if err != nil { if !apierrors.IsNotFound(err) { - return errors.Wrap(err, "failed to get kubeconfig secret") + log.Error(err, "getting kubeconfig secret", "name", clusterRef) + return fmt.Errorf("getting kubeconfig secret %s: %w", clusterRef, err) } + log.Info("kubeconfig secret not found, creating") if createErr := s.createCAPIKubeconfigSecret( ctx, cluster, &clusterRef, + log, ); createErr != nil { return fmt.Errorf("creating kubeconfig secret: %w", createErr) } @@ -66,7 +71,8 @@ func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *containerpb. return nil } -func (s *Service) reconcileAdditionalKubeconfigs(ctx context.Context, cluster *containerpb.Cluster) error { +func (s *Service) reconcileAdditionalKubeconfigs(ctx context.Context, cluster *containerpb.Cluster, log *logr.Logger) error { + log.Info("Reconciling additional kubeconfig") clusterRef := types.NamespacedName{ Name: s.scope.Cluster.Name + "-user", Namespace: s.scope.Cluster.Namespace, @@ -76,7 +82,7 @@ func (s *Service) reconcileAdditionalKubeconfigs(ctx context.Context, cluster *c _, err := secret.GetFromNamespacedName(ctx, s.scope.Client(), clusterRef, secret.Kubeconfig) if err != nil { if !apierrors.IsNotFound(err) { - return errors.Wrap(err, "failed to get kubeconfig (user) secret") + return fmt.Errorf("getting kubeconfig (user) secret %s: %w", clusterRef, err) } createErr := s.createUserKubeconfigSecret( @@ -85,7 +91,7 @@ func (s *Service) reconcileAdditionalKubeconfigs(ctx context.Context, cluster *c &clusterRef, ) if createErr != nil { - return err + return fmt.Errorf("creating additional kubeconfig secret: %w", err) } } @@ -119,29 +125,31 @@ func (s *Service) createUserKubeconfigSecret(ctx context.Context, cluster *conta out, err := clientcmd.Write(*cfg) if err != nil { - return errors.Wrap(err, "failed to serialize config to yaml") + return fmt.Errorf("serialize kubeconfig to yaml: %w", err) } kubeconfigSecret := kubeconfig.GenerateSecretWithOwner(*clusterRef, out, controllerOwnerRef) if err := s.scope.Client().Create(ctx, kubeconfigSecret); err != nil { - return errors.Wrap(err, "failed to create kubeconfig secret") + return fmt.Errorf("creating secret: %w", err) } return nil } -func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *containerpb.Cluster, clusterRef *types.NamespacedName) error { +func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *containerpb.Cluster, clusterRef *types.NamespacedName, log *logr.Logger) error { controllerOwnerRef := *metav1.NewControllerRef(s.scope.GCPManagedControlPlane, infrav1exp.GroupVersion.WithKind("GCPManagedControlPlane")) contextName := s.getKubeConfigContextName(false) cfg, err := s.createBaseKubeConfig(contextName, cluster) if err != nil { + log.Error(err, "failed creating base config") return fmt.Errorf("creating base kubeconfig: %w", err) } token, err := s.generateToken(ctx) if err != nil { + log.Error(err, "failed generating token") return err } cfg.AuthInfos = map[string]*api.AuthInfo{ @@ -152,12 +160,14 @@ func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *conta out, err := clientcmd.Write(*cfg) if err != nil { - return errors.Wrap(err, "failed to serialize config to yaml") + log.Error(err, "failed serializing kubeconfig to yaml") + return fmt.Errorf("serialize kubeconfig to yaml: %w", err) } kubeconfigSecret := kubeconfig.GenerateSecretWithOwner(*clusterRef, out, controllerOwnerRef) if err := s.scope.Client().Create(ctx, kubeconfigSecret); err != nil { - return errors.Wrap(err, "failed to create kubeconfig secret") + log.Error(err, "failed creating secret") + return fmt.Errorf("creating secret: %w", err) } return nil diff --git a/cloud/services/container/clusters/reconcile.go b/cloud/services/container/clusters/reconcile.go index bffc7de17..1e6c3e999 100644 --- a/cloud/services/container/clusters/reconcile.go +++ b/cloud/services/container/clusters/reconcile.go @@ -21,8 +21,10 @@ import ( "fmt" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/shared" "cloud.google.com/go/container/apiv1/containerpb" + "github.com/go-logr/logr" "github.com/googleapis/gax-go/v2/apierror" "github.com/pkg/errors" "google.golang.org/grpc/codes" @@ -39,7 +41,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { log := log.FromContext(ctx).WithValues("service", "container.clusters") log.Info("Reconciling cluster resources") - cluster, err := s.describeCluster(ctx) + cluster, err := s.describeCluster(ctx, &log) if err != nil { s.scope.GCPManagedControlPlane.Status.Ready = false conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) @@ -56,27 +58,41 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, err } - if len(nodePools) == 0 { - log.Info("At least 1 node pool is required to create GKE cluster") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") - return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil + if s.scope.IsAutopilotCluster() { + if len(nodePools) > 0 { + log.Error(ErrAutopilotClusterMachinePoolsNotAllowed, fmt.Sprintf("%d machine pools defined", len(nodePools))) + conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") + return ctrl.Result{}, ErrAutopilotClusterMachinePoolsNotAllowed + } + } else { + if len(nodePools) == 0 { + log.Info("At least 1 node pool is required to create GKE cluster with autopilot disabled") + conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") + return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil + } } - if err = s.createCluster(ctx); err != nil { + if err = s.createCluster(ctx, &log); err != nil { + log.Error(err, "failed creating cluster") conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, err } - log.Info("Cluster provisioning in progress") + log.Info("Cluster created provisioning in progress") conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1.ConditionSeverityInfo, "") conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1.ConditionSeverityInfo, "") conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } + log.V(2).Info("gke cluster found", "status", cluster.Status) + s.scope.GCPManagedControlPlane.Status.CurrentVersion = cluster.CurrentMasterVersion + switch cluster.Status { case containerpb.Cluster_PROVISIONING: log.Info("Cluster provisioning in progress") @@ -89,14 +105,14 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { log.Info("Cluster reconciling in progress") conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition) s.scope.GCPManagedControlPlane.Status.Ready = true - return ctrl.Result{}, nil + return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.Cluster_STOPPING: log.Info("Cluster stopping in progress") conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) s.scope.GCPManagedControlPlane.Status.Ready = false - return ctrl.Result{}, nil + return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.Cluster_ERROR, containerpb.Cluster_DEGRADED: var msg string if len(cluster.Conditions) > 0 { @@ -109,14 +125,15 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { case containerpb.Cluster_RUNNING: log.Info("Cluster running") default: - log.Error(errors.New("Unhandled cluster status"), fmt.Sprintf("Unhandled cluster status %s", cluster.Status), "name", s.scope.ClusterName()) - return ctrl.Result{}, nil + statusErr := NewErrUnexpectedClusterStatus(string(cluster.Status)) + log.Error(statusErr, fmt.Sprintf("Unhandled cluster status %s", cluster.Status), "name", s.scope.ClusterName()) + return ctrl.Result{}, statusErr } needUpdate, updateClusterRequest := s.checkDiffAndPrepareUpdate(cluster) if needUpdate { log.Info("Update required") - err = s.updateCluster(ctx, updateClusterRequest) + err = s.updateCluster(ctx, updateClusterRequest, &log) if err != nil { return ctrl.Result{}, err } @@ -128,22 +145,25 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition, infrav1exp.GKEControlPlaneUpdatedReason, clusterv1.ConditionSeverityInfo, "") // Reconcile kubeconfig - err = s.reconcileKubeconfig(ctx, cluster) + err = s.reconcileKubeconfig(ctx, cluster, &log) if err != nil { + log.Error(err, "Failed to reconcile CAPI kubeconfig") return ctrl.Result{}, err } - err = s.reconcileAdditionalKubeconfigs(ctx, cluster) + err = s.reconcileAdditionalKubeconfigs(ctx, cluster, &log) if err != nil { + log.Error(err, "Failed to reconcile additional kubeconfig") return ctrl.Result{}, err } s.scope.SetEndpoint(cluster.Endpoint) - log.Info("Cluster reconciled") conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1.ReadyCondition) conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition) conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneCreatedReason, clusterv1.ConditionSeverityInfo, "") s.scope.GCPManagedControlPlane.Status.Ready = true + log.Info("Cluster reconciled") + return ctrl.Result{}, nil } @@ -152,7 +172,7 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { log := log.FromContext(ctx).WithValues("service", "container.clusters") log.Info("Deleting cluster resources") - cluster, err := s.describeCluster(ctx) + cluster, err := s.describeCluster(ctx, &log) if err != nil { return ctrl.Result{}, err } @@ -178,7 +198,7 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { break } - if err = s.deleteCluster(ctx); err != nil { + if err = s.deleteCluster(ctx, &log); err != nil { conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, err } @@ -191,9 +211,7 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{}, nil } -func (s *Service) describeCluster(ctx context.Context) (*containerpb.Cluster, error) { - log := log.FromContext(ctx) - +func (s *Service) describeCluster(ctx context.Context, log *logr.Logger) (*containerpb.Cluster, error) { getClusterRequest := &containerpb.GetClusterRequest{ Name: s.scope.ClusterFullName(), } @@ -212,17 +230,22 @@ func (s *Service) describeCluster(ctx context.Context) (*containerpb.Cluster, er return cluster, nil } -func (s *Service) createCluster(ctx context.Context) error { - log := log.FromContext(ctx) - +func (s *Service) createCluster(ctx context.Context, log *logr.Logger) error { nodePools, machinePools, _ := s.scope.GetAllNodePools(ctx) + + log.V(2).Info("Running pre-flight checks on machine pools before cluster creation") + if err := shared.ManagedMachinePoolsPreflightCheck(nodePools, machinePools, s.scope.Region()); err != nil { + return fmt.Errorf("preflight checks on machine pools before cluster create: %w", err) + } + + isRegional := shared.IsRegional(s.scope.Region()) + cluster := &containerpb.Cluster{ Name: s.scope.ClusterName(), Network: *s.scope.GCPManagedCluster.Spec.Network.Name, Autopilot: &containerpb.Autopilot{ - Enabled: false, + Enabled: s.scope.GCPManagedControlPlane.Spec.EnableAutopilot, }, - NodePools: scope.ConvertToSdkNodePools(nodePools, machinePools), ReleaseChannel: &containerpb.ReleaseChannel{ Channel: convertToSdkReleaseChannel(s.scope.GCPManagedControlPlane.Spec.ReleaseChannel), }, @@ -230,10 +253,16 @@ func (s *Service) createCluster(ctx context.Context) error { if s.scope.GCPManagedControlPlane.Spec.ControlPlaneVersion != nil { cluster.InitialClusterVersion = *s.scope.GCPManagedControlPlane.Spec.ControlPlaneVersion } + if !s.scope.IsAutopilotCluster() { + cluster.NodePools = scope.ConvertToSdkNodePools(nodePools, machinePools, isRegional) + } + createClusterRequest := &containerpb.CreateClusterRequest{ Cluster: cluster, Parent: s.scope.ClusterLocation(), } + + log.V(2).Info("Creating GKE cluster") _, err := s.scope.ManagedControlPlaneClient().CreateCluster(ctx, createClusterRequest) if err != nil { log.Error(err, "Error creating GKE cluster", "name", s.scope.ClusterName()) @@ -243,9 +272,7 @@ func (s *Service) createCluster(ctx context.Context) error { return nil } -func (s *Service) updateCluster(ctx context.Context, updateClusterRequest *containerpb.UpdateClusterRequest) error { - log := log.FromContext(ctx) - +func (s *Service) updateCluster(ctx context.Context, updateClusterRequest *containerpb.UpdateClusterRequest, log *logr.Logger) error { _, err := s.scope.ManagedControlPlaneClient().UpdateCluster(ctx, updateClusterRequest) if err != nil { log.Error(err, "Error updating GKE cluster", "name", s.scope.ClusterName()) @@ -255,9 +282,7 @@ func (s *Service) updateCluster(ctx context.Context, updateClusterRequest *conta return nil } -func (s *Service) deleteCluster(ctx context.Context) error { - log := log.FromContext(ctx) - +func (s *Service) deleteCluster(ctx context.Context, log *logr.Logger) error { deleteClusterRequest := &containerpb.DeleteClusterRequest{ Name: s.scope.ClusterFullName(), } diff --git a/cloud/services/container/nodepools/reconcile.go b/cloud/services/container/nodepools/reconcile.go index 5b94b22cb..f315f339c 100644 --- a/cloud/services/container/nodepools/reconcile.go +++ b/cloud/services/container/nodepools/reconcile.go @@ -21,6 +21,7 @@ import ( "fmt" "reflect" + "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/util/resourceurl" "google.golang.org/api/iterator" @@ -28,9 +29,12 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "cloud.google.com/go/container/apiv1/containerpb" + "github.com/go-logr/logr" "github.com/googleapis/gax-go/v2/apierror" "github.com/pkg/errors" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/providerid" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/shared" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -44,7 +48,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { log := log.FromContext(ctx) log.Info("Reconciling node pool resources") - nodePool, err := s.describeNodePool(ctx) + nodePool, err := s.describeNodePool(ctx, &log) if err != nil { s.scope.GCPManagedMachinePool.Status.Ready = false conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) @@ -53,7 +57,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { if nodePool == nil { log.Info("Node pool not found, creating", "cluster", s.scope.Cluster.Name) s.scope.GCPManagedMachinePool.Status.Ready = false - if err = s.createNodePool(ctx); err != nil { + if err = s.createNodePool(ctx, &log); err != nil { conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) @@ -65,6 +69,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } + log.V(2).Info("Node pool found", "cluster", s.scope.Cluster.Name, "nodepool", nodePool.Name) instances, err := s.getInstances(ctx, nodePool) if err != nil { @@ -74,7 +79,15 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { } providerIDList := []string{} for _, instance := range instances { - providerIDList = append(providerIDList, *instance.Instance) + log.V(4).Info("parsing gce instance url", "url", *instance.Instance) + providerID, err := providerid.NewFromResourceURL(*instance.Instance) + if err != nil { + log.Error(err, "parsing instance url", "url", *instance.Instance) + s.scope.GCPManagedMachinePool.Status.Ready = false + conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolErrorReason, clusterv1.ConditionSeverityError, "") + return ctrl.Result{}, err + } + providerIDList = append(providerIDList, providerID.String()) } s.scope.GCPManagedMachinePool.Spec.ProviderIDList = providerIDList @@ -170,7 +183,7 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { log := log.FromContext(ctx) log.Info("Deleting node pool resources") - nodePool, err := s.describeNodePool(ctx) + nodePool, err := s.describeNodePool(ctx, &log) if err != nil { return ctrl.Result{}, err } @@ -209,9 +222,7 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{}, nil } -func (s *Service) describeNodePool(ctx context.Context) (*containerpb.NodePool, error) { - log := log.FromContext(ctx) - +func (s *Service) describeNodePool(ctx context.Context, log *logr.Logger) (*containerpb.NodePool, error) { getNodePoolRequest := &containerpb.GetNodePoolRequest{ Name: s.scope.NodePoolFullName(), } @@ -259,9 +270,16 @@ func (s *Service) getInstances(ctx context.Context, nodePool *containerpb.NodePo return instances, nil } -func (s *Service) createNodePool(ctx context.Context) error { +func (s *Service) createNodePool(ctx context.Context, log *logr.Logger) error { + log.V(2).Info("Running pre-flight checks on machine pool before creation") + if err := shared.ManagedMachinePoolPreflightCheck(s.scope.GCPManagedMachinePool, s.scope.MachinePool, s.scope.Region()); err != nil { + return fmt.Errorf("preflight checks on machine pool before creating: %w", err) + } + + isRegional := shared.IsRegional(s.scope.Region()) + createNodePoolRequest := &containerpb.CreateNodePoolRequest{ - NodePool: scope.ConvertToSdkNodePool(*s.scope.GCPManagedMachinePool, *s.scope.MachinePool), + NodePool: scope.ConvertToSdkNodePool(*s.scope.GCPManagedMachinePool, *s.scope.MachinePool, isRegional), Parent: s.scope.NodePoolLocation(), } _, err := s.scope.ManagedMachinePoolClient().CreateNodePool(ctx, createNodePoolRequest) @@ -342,7 +360,9 @@ func (s *Service) checkDiffAndPrepareUpdateVersionOrImage(existingNodePool *cont func (s *Service) checkDiffAndPrepareUpdateAutoscaling(existingNodePool *containerpb.NodePool) (bool, *containerpb.SetNodePoolAutoscalingRequest) { needUpdate := false - desiredAutoscaling := scope.ConvertToSdkNodePool(*s.scope.GCPManagedMachinePool, *s.scope.MachinePool).Autoscaling + isRegional := shared.IsRegional(s.scope.Region()) + + desiredAutoscaling := scope.ConvertToSdkNodePool(*s.scope.GCPManagedMachinePool, *s.scope.MachinePool, isRegional).Autoscaling var existingAutoscaling *containerpb.NodePoolAutoscaling if existingNodePool.Autoscaling != nil && existingNodePool.Autoscaling.Enabled { existingAutoscaling = &containerpb.NodePoolAutoscaling{ @@ -367,9 +387,15 @@ func (s *Service) checkDiffAndPrepareUpdateSize(existingNodePool *containerpb.No setNodePoolSizeRequest := containerpb.SetNodePoolSizeRequest{ Name: s.scope.NodePoolFullName(), } - if s.scope.GCPManagedMachinePool.Spec.InitialNodeCount != existingNodePool.InitialNodeCount { + + replicas := *s.scope.MachinePool.Spec.Replicas + if shared.IsRegional(s.scope.Region()) { + replicas /= cloud.DefaultNumRegionsPerZone + } + + if replicas != existingNodePool.InitialNodeCount { needUpdate = true - setNodePoolSizeRequest.NodeCount = s.scope.GCPManagedMachinePool.Spec.InitialNodeCount + setNodePoolSizeRequest.NodeCount = replicas } return needUpdate, &setNodePoolSizeRequest } diff --git a/cloud/services/shared/doc.go b/cloud/services/shared/doc.go new file mode 100644 index 000000000..9c3e9ba6b --- /dev/null +++ b/cloud/services/shared/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package shared implements functionality that is shared across different services. +package shared diff --git a/cloud/services/shared/machinepool.go b/cloud/services/shared/machinepool.go new file mode 100644 index 000000000..e1f7c44b7 --- /dev/null +++ b/cloud/services/shared/machinepool.go @@ -0,0 +1,66 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shared + +import ( + "errors" + "fmt" + "strings" + + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + + "sigs.k8s.io/cluster-api-provider-gcp/cloud" + infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" +) + +// ManagedMachinePoolPreflightCheck will perform checks against the machine pool before its created. +func ManagedMachinePoolPreflightCheck(managedPool *infrav1exp.GCPManagedMachinePool, machinePool *clusterv1exp.MachinePool, location string) error { + if machinePool.Spec.Template.Spec.InfrastructureRef.Name != managedPool.Name { + return fmt.Errorf("expect machinepool infraref (%s) to match managed machine pool name (%s)", machinePool.Spec.Template.Spec.InfrastructureRef.Name, managedPool.Name) + } + + if IsRegional(location) { + if *machinePool.Spec.Replicas%cloud.DefaultNumRegionsPerZone != 0 { + return fmt.Errorf("a machine pool (%s) in a regional cluster must have replicas with a multiple of %d", machinePool.Name, cloud.DefaultNumRegionsPerZone) + } + } + + return nil +} + +// ManagedMachinePoolsPreflightCheck will perform checks against a slice of machine pool before they are created. +func ManagedMachinePoolsPreflightCheck(managedPools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1exp.MachinePool, location string) error { + if len(machinePools) != len(managedPools) { + return errors.New("each machinepool must have a matching gcpmanagedmachinepool") + } + + for i := range machinePools { + machinepool := machinePools[i] + managedPool := managedPools[i] + + if err := ManagedMachinePoolPreflightCheck(&managedPool, &machinepool, location); err != nil { + return err + } + } + + return nil +} + +// IsRegional will check if a given location is a region (if not its a zone). +func IsRegional(location string) bool { + return strings.Count(location, "-") == 1 +} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_gcpmanagedcontrolplanes.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_gcpmanagedcontrolplanes.yaml index 8866aebd2..120440663 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_gcpmanagedcontrolplanes.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_gcpmanagedcontrolplanes.yaml @@ -19,7 +19,25 @@ spec: singular: gcpmanagedcontrolplane scope: Namespaced versions: - - name: v1beta1 + - additionalPrinterColumns: + - description: Cluster to which this GCPManagedControlPlane belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Control plane is ready + jsonPath: .status.ready + name: Ready + type: string + - description: The current Kubernetes version + jsonPath: .status.currentVersion + name: CurrentVersion + type: string + - description: API Endpoint + jsonPath: .spec.endpoint + name: Endpoint + priority: 1 + type: string + name: v1beta1 schema: openAPIV3Schema: description: GCPManagedControlPlane is the Schema for the gcpmanagedcontrolplanes @@ -79,7 +97,7 @@ spec: type: string releaseChannel: description: ReleaseChannel represents the release channel of the - GKE cluster. If not specified, it defaults to `regular`. + GKE cluster. enum: - rapid - regular @@ -95,7 +113,7 @@ spec: GCPManagedControlPlane. properties: conditions: - description: Conditions specifies the cpnditions for the managed control + description: Conditions specifies the conditions for the managed control plane items: description: Condition defines an observation of a Cluster API resource @@ -140,7 +158,14 @@ spec: - type type: object type: array + currentVersion: + description: CurrentVersion shows the current version of the GKE control + plane. + type: string ready: + default: false + description: Ready denotes that the GCPManagedControlPlane API Server + is ready to receive requests. type: boolean required: - ready diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_gcpmanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_gcpmanagedmachinepools.yaml index 5581257cb..fc9c95835 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_gcpmanagedmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_gcpmanagedmachinepools.yaml @@ -51,12 +51,6 @@ spec: GCP resources managed by the GCP provider, in addition to the ones added by default. type: object - initialNodeCount: - description: InitialNodeCount represents the initial number of nodes - for the pool. In regional or multi-zonal clusters, this is the number - of nodes per zone. - format: int32 - type: integer kubernetesLabels: additionalProperties: type: string @@ -111,8 +105,6 @@ spec: format: int32 type: integer type: object - required: - - initialNodeCount type: object status: description: GCPManagedMachinePoolStatus defines the observed state of diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 28f680927..c51195cc9 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -23,6 +23,7 @@ spec: - --leader-elect - --feature-gates=GKE=${EXP_CAPG_GKE:=false} - "--metrics-bind-addr=localhost:8080" + - "--v=${CAPG_LOGLEVEL:=0}" image: controller:latest imagePullPolicy: IfNotPresent name: manager diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index fd27661a2..623453dbe 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -21,8 +21,12 @@ rules: resources: - secrets verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - cluster.x-k8s.io diff --git a/docs/book/src/topics/gke/enabling.md b/docs/book/src/topics/gke/enabling.md index f6598b373..98cb7139f 100644 --- a/docs/book/src/topics/gke/enabling.md +++ b/docs/book/src/topics/gke/enabling.md @@ -6,3 +6,5 @@ Enabling GKE support is done via the **GKE** feature flag by setting it to true. export EXP_CAPG_GKE=true clusterctl init --infrastructure gcp ``` + +> IMPORTANT: To use GKE the service account used for CAPG will need the `iam.serviceAccountTokenCreator` role assigned. diff --git a/docs/book/src/topics/prerequisites.md b/docs/book/src/topics/prerequisites.md index d4a529410..1c090655e 100644 --- a/docs/book/src/topics/prerequisites.md +++ b/docs/book/src/topics/prerequisites.md @@ -59,7 +59,11 @@ gcloud compute routers nats create "${CLUSTER_NAME}-mynat" --project="${GCP_PROJ To create and manage clusters, this infrastructure provider uses a service account to authenticate with GCP's APIs. -From your cloud console, follow [these instructions](https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating) to create a new service account with `Editor` permissions. Afterwards, generate a JSON Key and store it somewhere safe. +From your cloud console, follow [these instructions](https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating) to create a new service account with `Editor` permissions. + +If you plan yo use GKE the the service account will also need the `iam.serviceAccountTokenCreator` role. + +Afterwards, generate a JSON Key and store it somewhere safe. ### Building images diff --git a/exp/api/v1beta1/gcpmanagedcontrolplane_types.go b/exp/api/v1beta1/gcpmanagedcontrolplane_types.go index d19d046da..166c4e651 100644 --- a/exp/api/v1beta1/gcpmanagedcontrolplane_types.go +++ b/exp/api/v1beta1/gcpmanagedcontrolplane_types.go @@ -41,7 +41,7 @@ type GCPManagedControlPlaneSpec struct { Location string `json:"location"` // EnableAutopilot indicates whether to enable autopilot for this GKE cluster. EnableAutopilot bool `json:"enableAutopilot"` - // ReleaseChannel represents the release channel of the GKE cluster. If not specified, it defaults to `regular`. + // ReleaseChannel represents the release channel of the GKE cluster. // +optional ReleaseChannel *ReleaseChannel `json:"releaseChannel,omitempty"` // ControlPlaneVersion represents the control plane version of the GKE cluster. @@ -56,16 +56,27 @@ type GCPManagedControlPlaneSpec struct { // GCPManagedControlPlaneStatus defines the observed state of GCPManagedControlPlane. type GCPManagedControlPlaneStatus struct { + // Ready denotes that the GCPManagedControlPlane API Server is ready to + // receive requests. + // +kubebuilder:default=false Ready bool `json:"ready"` - // Conditions specifies the cpnditions for the managed control plane + // Conditions specifies the conditions for the managed control plane Conditions clusterv1.Conditions `json:"conditions,omitempty"` + + // CurrentVersion shows the current version of the GKE control plane. + // +optional + CurrentVersion string `json:"currentVersion,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:resource:path=gcpmanagedcontrolplanes,scope=Namespaced,categories=cluster-api,shortName=gcpmcp // +kubebuilder:storageversion // +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this GCPManagedControlPlane belongs" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane is ready" +// +kubebuilder:printcolumn:name="CurrentVersion",type="string",JSONPath=".status.currentVersion",description="The current Kubernetes version" +// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.endpoint",description="API Endpoint",priority=1 // GCPManagedControlPlane is the Schema for the gcpmanagedcontrolplanes API. type GCPManagedControlPlane struct { diff --git a/exp/api/v1beta1/gcpmanagedcontrolplane_webhook.go b/exp/api/v1beta1/gcpmanagedcontrolplane_webhook.go index 04a57cf17..a72614fd4 100644 --- a/exp/api/v1beta1/gcpmanagedcontrolplane_webhook.go +++ b/exp/api/v1beta1/gcpmanagedcontrolplane_webhook.go @@ -84,6 +84,10 @@ func (r *GCPManagedControlPlane) ValidateCreate() error { ) } + if r.Spec.EnableAutopilot && r.Spec.ReleaseChannel == nil { + allErrs = append(allErrs, field.Required(field.NewPath("spec", "ReleaseChannel"), "Release channel is required for an autopilot enabled cluster")) + } + if len(allErrs) == 0 { return nil } diff --git a/exp/api/v1beta1/gcpmanagedmachinepool_types.go b/exp/api/v1beta1/gcpmanagedmachinepool_types.go index 81e5037a3..b8a2aea0a 100644 --- a/exp/api/v1beta1/gcpmanagedmachinepool_types.go +++ b/exp/api/v1beta1/gcpmanagedmachinepool_types.go @@ -34,9 +34,6 @@ type GCPManagedMachinePoolSpec struct { // then a default name will be created based on the namespace and name of the managed machine pool. // +optional NodePoolName string `json:"nodePoolName,omitempty"` - // InitialNodeCount represents the initial number of nodes for the pool. - // In regional or multi-zonal clusters, this is the number of nodes per zone. - InitialNodeCount int32 `json:"initialNodeCount"` // Scaling specifies scaling for the node pool // +optional Scaling *NodePoolAutoScaling `json:"scaling,omitempty"` diff --git a/exp/api/v1beta1/gcpmanagedmachinepool_webhook.go b/exp/api/v1beta1/gcpmanagedmachinepool_webhook.go index ca442f342..22eff09aa 100644 --- a/exp/api/v1beta1/gcpmanagedmachinepool_webhook.go +++ b/exp/api/v1beta1/gcpmanagedmachinepool_webhook.go @@ -54,20 +54,6 @@ func (r *GCPManagedMachinePool) Default() { var _ webhook.Validator = &GCPManagedMachinePool{} -func (r *GCPManagedMachinePool) validateNodeCount() field.ErrorList { - var allErrs field.ErrorList - if r.Spec.InitialNodeCount < 0 { - allErrs = append(allErrs, - field.Invalid(field.NewPath("spec", "InitialNodeCount"), - r.Spec.InitialNodeCount, "must be greater or equal to zero"), - ) - } - if len(allErrs) == 0 { - return nil - } - return allErrs -} - func (r *GCPManagedMachinePool) validateScaling() field.ErrorList { var allErrs field.ErrorList if r.Spec.Scaling != nil { @@ -79,16 +65,10 @@ func (r *GCPManagedMachinePool) validateScaling() field.ErrorList { if *min < 0 { allErrs = append(allErrs, field.Invalid(minField, *min, "must be greater or equal zero")) } - if *min > r.Spec.InitialNodeCount { - allErrs = append(allErrs, field.Invalid(minField, *min, fmt.Sprintf("must be less or equal to %d", r.Spec.InitialNodeCount))) - } if max != nil && *max < *min { allErrs = append(allErrs, field.Invalid(maxField, *max, fmt.Sprintf("must be greater than field %s", minField.String()))) } } - if max != nil && *max < r.Spec.InitialNodeCount { - allErrs = append(allErrs, field.Invalid(maxField, *max, fmt.Sprintf("must be greater or equal to %d", r.Spec.InitialNodeCount))) - } } if len(allErrs) == 0 { return nil @@ -108,10 +88,6 @@ func (r *GCPManagedMachinePool) ValidateCreate() error { ) } - if errs := r.validateNodeCount(); errs != nil || len(errs) == 0 { - allErrs = append(allErrs, errs...) - } - if errs := r.validateScaling(); errs != nil || len(errs) == 0 { allErrs = append(allErrs, errs...) } @@ -136,10 +112,6 @@ func (r *GCPManagedMachinePool) ValidateUpdate(oldRaw runtime.Object) error { ) } - if errs := r.validateNodeCount(); errs != nil || len(errs) == 0 { - allErrs = append(allErrs, errs...) - } - if errs := r.validateScaling(); errs != nil || len(errs) == 0 { allErrs = append(allErrs, errs...) } diff --git a/exp/controllers/gcpmanagedcluster_controller.go b/exp/controllers/gcpmanagedcluster_controller.go index 6b8167b13..3dbc396ea 100644 --- a/exp/controllers/gcpmanagedcluster_controller.go +++ b/exp/controllers/gcpmanagedcluster_controller.go @@ -167,7 +167,7 @@ func (r *GCPManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr } func (r *GCPManagedClusterReconciler) reconcile(ctx context.Context, clusterScope *scope.ManagedClusterScope) (ctrl.Result, error) { - log := log.FromContext(ctx) + log := log.FromContext(ctx).WithValues("controller", "gcpmanagedcluster") log.Info("Reconciling GCPManagedCluster") controllerutil.AddFinalizer(clusterScope.GCPManagedCluster, infrav1exp.ClusterFinalizer) @@ -193,14 +193,15 @@ func (r *GCPManagedClusterReconciler) reconcile(ctx context.Context, clusterScop } clusterScope.SetFailureDomains(failureDomains) - reconcilers := []cloud.Reconciler{ - networks.New(clusterScope), - subnets.New(clusterScope), + reconcilers := map[string]cloud.Reconciler{ + "networks": networks.New(clusterScope), + "subnets": subnets.New(clusterScope), } - for _, r := range reconcilers { + for name, r := range reconcilers { + log.V(4).Info("Calling reconciler", "reconciler", name) if err := r.Reconcile(ctx); err != nil { - log.Error(err, "Reconcile error") + log.Error(err, "Reconcile error", "reconciler", name) record.Warnf(clusterScope.GCPManagedCluster, "GCPManagedClusterReconcile", "Reconcile error - %v", err) return ctrl.Result{}, err } @@ -223,7 +224,7 @@ func (r *GCPManagedClusterReconciler) reconcile(ctx context.Context, clusterScop } func (r *GCPManagedClusterReconciler) reconcileDelete(ctx context.Context, clusterScope *scope.ManagedClusterScope) (ctrl.Result, error) { - log := log.FromContext(ctx) + log := log.FromContext(ctx).WithValues("controller", "gcpmanagedcluster", "action", "delete") log.Info("Reconciling Delete GCPManagedCluster") if clusterScope.GCPManagedControlPlane != nil { @@ -231,14 +232,15 @@ func (r *GCPManagedClusterReconciler) reconcileDelete(ctx context.Context, clust return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } - reconcilers := []cloud.Reconciler{ - subnets.New(clusterScope), - networks.New(clusterScope), + reconcilers := map[string]cloud.Reconciler{ + "subnets": subnets.New(clusterScope), + "networks": networks.New(clusterScope), } - for _, r := range reconcilers { + for name, r := range reconcilers { + log.V(4).Info("Calling reconciler delete", "reconciler", name) if err := r.Delete(ctx); err != nil { - log.Error(err, "Reconcile error") + log.Error(err, "Reconcile error", "reconciler", name) record.Warnf(clusterScope.GCPManagedCluster, "GCPManagedClusterReconcile", "Reconcile error - %v", err) return ctrl.Result{}, err } @@ -266,11 +268,6 @@ func (r *GCPManagedClusterReconciler) managedControlPlaneMapper(ctx context.Cont return nil } - if gcpManagedControlPlane.Spec.Endpoint.IsZero() { - log.V(2).Info("GCPManagedControlPlane has no endpoint, skipping mapping") - return nil - } - cluster, err := util.GetOwnerCluster(ctx, r.Client, gcpManagedControlPlane.ObjectMeta) if err != nil { log.Error(err, "failed to get owning cluster") diff --git a/exp/controllers/gcpmanagedcontrolplane_controller.go b/exp/controllers/gcpmanagedcontrolplane_controller.go index a8cd37751..0e88d7e78 100644 --- a/exp/controllers/gcpmanagedcontrolplane_controller.go +++ b/exp/controllers/gcpmanagedcontrolplane_controller.go @@ -57,14 +57,16 @@ type GCPManagedControlPlaneReconciler struct { //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=gcpmanagedcontrolplanes/finalizers,verbs=update //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=gcpmanagedclusters,verbs=get;list;watch //+kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete;patch // SetupWithManager sets up the controller with the Manager. func (r *GCPManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { log := log.FromContext(ctx).WithValues("controller", "GCPManagedControlPlane") + gcpManagedControlPlane := &infrav1exp.GCPManagedControlPlane{} c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options). - For(&infrav1exp.GCPManagedControlPlane{}). + For(gcpManagedControlPlane). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)). Build(r) if err != nil { @@ -73,7 +75,7 @@ func (r *GCPManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, if err = c.Watch( &source.Kind{Type: &clusterv1.Cluster{}}, - handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1exp.GroupVersion.WithKind("GCPManagedCluster"), mgr.GetClient(), &infrav1exp.GCPManagedControlPlane{})), + handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, gcpManagedControlPlane.GroupVersionKind(), mgr.GetClient(), &infrav1exp.GCPManagedControlPlane{})), predicates.ClusterUnpausedAndInfrastructureReady(log), ); err != nil { return fmt.Errorf("failed adding a watch for ready clusters: %w", err) @@ -151,7 +153,7 @@ func (r *GCPManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct } func (r *GCPManagedControlPlaneReconciler) reconcile(ctx context.Context, managedControlPlaneScope *scope.ManagedControlPlaneScope) (ctrl.Result, error) { - log := log.FromContext(ctx) + log := log.FromContext(ctx).WithValues("controller", "gcpmanagedcontrolplane") log.Info("Reconciling GCPManagedControlPlane") controllerutil.AddFinalizer(managedControlPlaneScope.GCPManagedControlPlane, infrav1exp.ManagedControlPlaneFinalizer) @@ -164,18 +166,23 @@ func (r *GCPManagedControlPlaneReconciler) reconcile(ctx context.Context, manage return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } - reconcilers := []cloud.ReconcilerWithResult{ - clusters.New(managedControlPlaneScope), + reconcilers := map[string]cloud.ReconcilerWithResult{ + "container_clusters": clusters.New(managedControlPlaneScope), } - for _, r := range reconcilers { + for name, r := range reconcilers { res, err := r.Reconcile(ctx) if err != nil { - log.Error(err, "Reconcile error") + log.Error(err, "Reconcile error", "reconciler", name) record.Warnf(managedControlPlaneScope.GCPManagedControlPlane, "GCPManagedControlPlaneReconcile", "Reconcile error - %v", err) return ctrl.Result{}, err } + if res.RequeueAfter > 0 { + log.V(4).Info("Reconciler requested requeueAfter", "reconciler", name, "after", res.RequeueAfter) + return res, nil + } if res.Requeue { + log.V(4).Info("Reconciler requested requeue", "reconciler", name) return res, nil } } @@ -184,21 +191,26 @@ func (r *GCPManagedControlPlaneReconciler) reconcile(ctx context.Context, manage } func (r *GCPManagedControlPlaneReconciler) reconcileDelete(ctx context.Context, managedControlPlaneScope *scope.ManagedControlPlaneScope) (ctrl.Result, error) { - log := log.FromContext(ctx) + log := log.FromContext(ctx).WithValues("controller", "gcpmanagedcontrolplane", "action", "delete") log.Info("Deleting GCPManagedControlPlane") - reconcilers := []cloud.ReconcilerWithResult{ - clusters.New(managedControlPlaneScope), + reconcilers := map[string]cloud.ReconcilerWithResult{ + "container_clusters": clusters.New(managedControlPlaneScope), } - for _, r := range reconcilers { + for name, r := range reconcilers { res, err := r.Delete(ctx) if err != nil { - log.Error(err, "Reconcile error") + log.Error(err, "Reconcile error", "reconciler", name) record.Warnf(managedControlPlaneScope.GCPManagedControlPlane, "GCPManagedControlPlaneReconcile", "Reconcile error - %v", err) return ctrl.Result{}, err } + if res.RequeueAfter > 0 { + log.V(4).Info("Reconciler requested requeueAfter", "reconciler", name, "after", res.RequeueAfter) + return res, nil + } if res.Requeue { + log.V(4).Info("Reconciler requested requeue", "reconciler", name) return res, nil } } diff --git a/exp/controllers/gcpmanagedmachinepool_controller.go b/exp/controllers/gcpmanagedmachinepool_controller.go index 74788f398..537a79658 100644 --- a/exp/controllers/gcpmanagedmachinepool_controller.go +++ b/exp/controllers/gcpmanagedmachinepool_controller.go @@ -317,7 +317,7 @@ func (r *GCPManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr } func (r *GCPManagedMachinePoolReconciler) reconcile(ctx context.Context, managedMachinePoolScope *scope.ManagedMachinePoolScope) (ctrl.Result, error) { - log := log.FromContext(ctx) + log := log.FromContext(ctx).WithValues("controller", "gcpmanagedmachinepool") log.Info("Reconciling GCPManagedMachinePool") controllerutil.AddFinalizer(managedMachinePoolScope.GCPManagedMachinePool, infrav1exp.ManagedMachinePoolFinalizer) @@ -325,25 +325,31 @@ func (r *GCPManagedMachinePoolReconciler) reconcile(ctx context.Context, managed return ctrl.Result{}, err } - reconcilers := []cloud.ReconcilerWithResult{ - nodepools.New(managedMachinePoolScope), + reconcilers := map[string]cloud.ReconcilerWithResult{ + "nodepools": nodepools.New(managedMachinePoolScope), } - for _, r := range reconcilers { + for name, r := range reconcilers { + log.V(4).Info("Calling reconciler", "reconciler", name) res, err := r.Reconcile(ctx) if err != nil { var e *apierror.APIError if ok := errors.As(err, &e); ok { if e.GRPCStatus().Code() == codes.FailedPrecondition { - log.Info("Cannot perform update when there's other operation, retry later") + log.Info("Cannot perform update when there's other operation, retry later", "reconciler", name) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } } - log.Error(err, "Reconcile error") + log.Error(err, "Reconcile error", "reconciler", name) record.Warnf(managedMachinePoolScope.GCPManagedMachinePool, "GCPManagedMachinePoolReconcile", "Reconcile error - %v", err) return ctrl.Result{}, err } + if res.RequeueAfter > 0 { + log.V(4).Info("Reconciler requested requeueAfter", "reconciler", name, "after", res.RequeueAfter) + return res, nil + } if res.Requeue { + log.V(4).Info("Reconciler requested requeue", "reconciler", name) return res, nil } } @@ -352,21 +358,27 @@ func (r *GCPManagedMachinePoolReconciler) reconcile(ctx context.Context, managed } func (r *GCPManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, managedMachinePoolScope *scope.ManagedMachinePoolScope) (ctrl.Result, error) { - log := log.FromContext(ctx) + log := log.FromContext(ctx).WithValues("controller", "gcpmanagedmachinepool", "action", "delete") log.Info("Deleting GCPManagedMachinePool") - reconcilers := []cloud.ReconcilerWithResult{ - nodepools.New(managedMachinePoolScope), + reconcilers := map[string]cloud.ReconcilerWithResult{ + "nodepools": nodepools.New(managedMachinePoolScope), } - for _, r := range reconcilers { + for name, r := range reconcilers { + log.V(4).Info("Calling reconciler delete", "reconciler", name) res, err := r.Delete(ctx) if err != nil { - log.Error(err, "Reconcile error") + log.Error(err, "Reconcile error", "reconciler", name) record.Warnf(managedMachinePoolScope.GCPManagedMachinePool, "GCPManagedMachinePoolReconcile", "Reconcile error - %v", err) return ctrl.Result{}, err } + if res.RequeueAfter > 0 { + log.V(4).Info("Reconciler requested requeueAfter", "reconciler", name, "after", res.RequeueAfter) + return res, nil + } if res.Requeue { + log.V(4).Info("Reconciler requested requeue", "reconciler", name) return res, nil } } diff --git a/templates/cluster-template-gke-autopilot.yaml b/templates/cluster-template-gke-autopilot.yaml new file mode 100644 index 000000000..21363b078 --- /dev/null +++ b/templates/cluster-template-gke-autopilot.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: GCPManagedCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: GCPManagedControlPlane + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: GCPManagedCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + project: "${GCP_PROJECT}" + region: "${GCP_REGION}" + network: + name: "${GCP_NETWORK_NAME}" +--- +kind: GCPManagedControlPlane +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + project: "${GCP_PROJECT}" + location: "${GCP_REGION}" + enableAutopilot: true diff --git a/templates/cluster-template-gke.yaml b/templates/cluster-template-gke.yaml index 721ce9ba2..b4cfd98cf 100644 --- a/templates/cluster-template-gke.yaml +++ b/templates/cluster-template-gke.yaml @@ -55,8 +55,7 @@ apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: GCPManagedMachinePool metadata: name: "${CLUSTER_NAME}-mp-0" -spec: - initialNodeCount: 1 +spec: {} diff --git a/test/e2e/config/gcp-ci.yaml b/test/e2e/config/gcp-ci.yaml index 83403d9d9..9e147746f 100644 --- a/test/e2e/config/gcp-ci.yaml +++ b/test/e2e/config/gcp-ci.yaml @@ -69,6 +69,8 @@ providers: - sourcePath: "${PWD}/test/e2e/data/infrastructure-gcp/cluster-template-topology.yaml" - sourcePath: "${PWD}/test/e2e/data/infrastructure-gcp/clusterclass-quick-start.yaml" - sourcePath: "${PWD}/test/e2e/data/infrastructure-gcp/cluster-template-ci-with-creds.yaml" + - sourcePath: "${PWD}/test/e2e/data/infrastructure-gcp/cluster-template-ci-gke.yaml" + - sourcePath: "${PWD}/test/e2e/data/infrastructure-gcp/cluster-template-ci-gke-autopilot.yaml" variables: KUBERNETES_VERSION: "${KUBERNETES_VERSION:-v1.25.5}" @@ -92,12 +94,18 @@ variables: KUBETEST_CONFIGURATION: "${PWD}/test/e2e/data/kubetest/conformance.yaml" IMAGE_ID: "${IMAGE_ID}" IP_FAMILY: "IPv4" + EXP_CAPG_GKE: "true" + EXP_MACHINE_POOL: "true" + GKE_MACHINE_POOL_MIN: "1" + GKE_MACHINE_POOL_MAX: "2" + CAPG_LOGLEVEL: "4" intervals: default/wait-controllers: ["3m", "10s"] default/wait-cluster: ["20m", "10s"] default/wait-control-plane: ["30m", "10s"] default/wait-worker-nodes: ["30m", "10s"] + default/wait-worker-machine-pools: ["30m", "10s"] default/wait-delete-cluster: ["20m", "10s"] default/wait-machine-upgrade: ["50m", "10s"] default/wait-machine-remediation: ["30m", "10s"] diff --git a/test/e2e/data/infrastructure-gcp/cluster-template-ci-gke-autopilot.yaml b/test/e2e/data/infrastructure-gcp/cluster-template-ci-gke-autopilot.yaml new file mode 100644 index 000000000..0ef2401f9 --- /dev/null +++ b/test/e2e/data/infrastructure-gcp/cluster-template-ci-gke-autopilot.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: GCPManagedCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: GCPManagedControlPlane + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: GCPManagedCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + project: "${GCP_PROJECT}" + region: "${GCP_REGION}" + network: + name: "${GCP_NETWORK_NAME}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: GCPManagedControlPlane +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + project: "${GCP_PROJECT}" + location: "${GCP_REGION}" + enableAutopilot: true + releaseChannel: "regular" diff --git a/test/e2e/data/infrastructure-gcp/cluster-template-ci-gke.yaml b/test/e2e/data/infrastructure-gcp/cluster-template-ci-gke.yaml new file mode 100644 index 000000000..228acdb52 --- /dev/null +++ b/test/e2e/data/infrastructure-gcp/cluster-template-ci-gke.yaml @@ -0,0 +1,63 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: GCPManagedCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: GCPManagedControlPlane + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: GCPManagedCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + project: "${GCP_PROJECT}" + region: "${GCP_REGION}" + network: + name: "${GCP_NETWORK_NAME}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: GCPManagedControlPlane +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + project: "${GCP_PROJECT}" + location: "${GCP_REGION}" + releaseChannel: "regular" +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachinePool +metadata: + name: ${CLUSTER_NAME}-mp-0 +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + template: + spec: + bootstrap: + dataSecretName: "" + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: GCPManagedMachinePool + name: ${CLUSTER_NAME}-mp-0 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: GCPManagedMachinePool +metadata: + name: ${CLUSTER_NAME}-mp-0 +spec: + scaling: + minCount: ${GKE_MACHINE_POOL_MIN} + maxCount: ${GKE_MACHINE_POOL_MAX} + diff --git a/test/e2e/e2e_gke_test.go b/test/e2e/e2e_gke_test.go new file mode 100644 index 000000000..b158345e8 --- /dev/null +++ b/test/e2e/e2e_gke_test.go @@ -0,0 +1,162 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +const ( + defaultNumZonesPerRegion = 3 +) + +var _ = Describe("GKE workload cluster creation", func() { + var ( + ctx = context.TODO() + specName = "create-gke-workload-cluster" + namespace *corev1.Namespace + cancelWatches context.CancelFunc + result *ApplyManagedClusterTemplateAndWaitResult + clusterName string + clusterctlLogFolder string + ) + + BeforeEach(func() { + Expect(e2eConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) + Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName) + + Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion)) + + clusterName = fmt.Sprintf("capg-e2e-%s", util.RandomString(6)) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) + + result = new(ApplyManagedClusterTemplateAndWaitResult) + + // We need to override clusterctl apply log folder to avoid getting our credentials exposed. + clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName()) + }) + + AfterEach(func() { + cleanInput := cleanupInput{ + SpecName: specName, + Cluster: result.Cluster, + ClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + CancelWatches: cancelWatches, + IntervalsGetter: e2eConfig.GetIntervals, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactFolder, + } + + dumpSpecResourcesAndCleanup(ctx, cleanInput) + }) + + Context("Creating a GKE cluster without autopilot", func() { + It("Should create a cluster with 1 machine pool and scale", func() { + By("Initializes with 1 machine pool") + + minPoolSize, ok := e2eConfig.Variables["GKE_MACHINE_POOL_MIN"] + Expect(ok).To(BeTrue(), "must have min pool size set via the GKE_MACHINE_POOL_MIN variable") + maxPoolSize, ok := e2eConfig.Variables["GKE_MACHINE_POOL_MAX"] + Expect(ok).To(BeTrue(), "must have max pool size set via the GKE_MACHINE_POOL_MAX variable") + + ApplyManagedClusterTemplateAndWait(ctx, ApplyManagedClusterTemplateAndWaitInput{ + ClusterProxy: bootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: clusterctlLogFolder, + ClusterctlConfigPath: clusterctlConfigPath, + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: "ci-gke", + Namespace: namespace.Name, + ClusterName: clusterName, + KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion), + ControlPlaneMachineCount: pointer.Int64Ptr(1), + WorkerMachineCount: pointer.Int64Ptr(3), + ClusterctlVariables: map[string]string{ + "GKE_MACHINE_POOL_MIN": minPoolSize, + "GKE_MACHINE_POOL_MAX": maxPoolSize, + }, + }, + WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachinePools: e2eConfig.GetIntervals(specName, "wait-worker-machine-pools"), + }, result) + + By("Scaling the machine pool up") + framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{ + ClusterProxy: bootstrapClusterProxy, + Cluster: result.Cluster, + Replicas: 6, + MachinePools: result.MachinePools, + WaitForMachinePoolToScale: e2eConfig.GetIntervals(specName, "wait-worker-machine-pools"), + }) + + By("Scaling the machine pool down") + framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{ + ClusterProxy: bootstrapClusterProxy, + Cluster: result.Cluster, + Replicas: 3, + MachinePools: result.MachinePools, + WaitForMachinePoolToScale: e2eConfig.GetIntervals(specName, "wait-worker-machine-pools"), + }) + }) + }) + + Context("Creating a GKE cluster with autopilot", func() { + It("Should create a cluster with 1 machine pool and scale", func() { + By("Initializes with 1 machine pool") + + ApplyManagedClusterTemplateAndWait(ctx, ApplyManagedClusterTemplateAndWaitInput{ + ClusterProxy: bootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: clusterctlLogFolder, + ClusterctlConfigPath: clusterctlConfigPath, + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: "ci-gke-autopilot", + Namespace: namespace.Name, + ClusterName: clusterName, + KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion), + ControlPlaneMachineCount: pointer.Int64Ptr(1), + WorkerMachineCount: pointer.Int64Ptr(0), + }, + WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachinePools: e2eConfig.GetIntervals(specName, "wait-worker-machine-pools"), + }, result) + }) + }) +}) diff --git a/test/e2e/gke.go b/test/e2e/gke.go new file mode 100644 index 000000000..1eb3128ab --- /dev/null +++ b/test/e2e/gke.go @@ -0,0 +1,208 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + + infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" +) + +const ( + retryableOperationInterval = 3 * time.Second + retryableOperationTimeout = 3 * time.Minute +) + +// ApplyManagedClusterTemplateAndWaitInput is the input type for ApplyManagedClusterTemplateAndWait. +type ApplyManagedClusterTemplateAndWaitInput struct { + ClusterProxy framework.ClusterProxy + ConfigCluster clusterctl.ConfigClusterInput + WaitForClusterIntervals []interface{} + WaitForControlPlaneIntervals []interface{} + WaitForMachinePools []interface{} + Args []string // extra args to be used during `kubectl apply` + PreWaitForCluster func() + PostMachinesProvisioned func() + WaitForControlPlaneInitialized Waiter +} + +// Waiter is a function that runs and waits for a long-running operation to finish and updates the result. +type Waiter func(ctx context.Context, input ApplyManagedClusterTemplateAndWaitInput, result *ApplyManagedClusterTemplateAndWaitResult) + +// ApplyManagedClusterTemplateAndWaitResult is the output type for ApplyClusterTemplateAndWait. +type ApplyManagedClusterTemplateAndWaitResult struct { + ClusterClass *clusterv1.ClusterClass + Cluster *clusterv1.Cluster + ControlPlane *infrav1exp.GCPManagedControlPlane + MachinePools []*expv1.MachinePool +} + +// ApplyManagedClusterTemplateAndWait gets a managed cluster template using clusterctl, and waits for the cluster to be ready. +// Important! this method assumes the cluster uses a GCPManagedControlPlane and MachinePools. +func ApplyManagedClusterTemplateAndWait(ctx context.Context, input ApplyManagedClusterTemplateAndWaitInput, result *ApplyManagedClusterTemplateAndWaitResult) { + setDefaults(&input) + Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyManagedClusterTemplateAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyManagedClusterTemplateAndWait") + Expect(result).ToNot(BeNil(), "Invalid argument. result can't be nil when calling ApplyManagedClusterTemplateAndWait") + Expect(input.ConfigCluster.Flavor).ToNot(BeEmpty(), "Invalid argument. input.ConfigCluster.Flavor can't be empty") + Expect(input.ConfigCluster.ControlPlaneMachineCount).ToNot(BeNil()) + Expect(input.ConfigCluster.WorkerMachineCount).ToNot(BeNil()) + + Byf("Creating the GKE workload cluster with name %q using the %q template (Kubernetes %s)", + input.ConfigCluster.ClusterName, input.ConfigCluster.Flavor, input.ConfigCluster.KubernetesVersion) + + By("Getting the cluster template yaml") + workloadClusterTemplate := clusterctl.ConfigCluster(ctx, clusterctl.ConfigClusterInput{ + // pass reference to the management cluster hosting this test + KubeconfigPath: input.ConfigCluster.KubeconfigPath, + // pass the clusterctl config file that points to the local provider repository created for this test, + ClusterctlConfigPath: input.ConfigCluster.ClusterctlConfigPath, + // select template + Flavor: input.ConfigCluster.Flavor, + // define template variables + Namespace: input.ConfigCluster.Namespace, + ClusterName: input.ConfigCluster.ClusterName, + KubernetesVersion: input.ConfigCluster.KubernetesVersion, + ControlPlaneMachineCount: input.ConfigCluster.ControlPlaneMachineCount, + WorkerMachineCount: input.ConfigCluster.WorkerMachineCount, + InfrastructureProvider: input.ConfigCluster.InfrastructureProvider, + // setup clusterctl logs folder + LogFolder: input.ConfigCluster.LogFolder, + ClusterctlVariables: input.ConfigCluster.ClusterctlVariables, + }) + Expect(workloadClusterTemplate).ToNot(BeNil(), "Failed to get the cluster template") + + By("Applying the cluster template yaml to the cluster") + Eventually(func() error { + return input.ClusterProxy.Apply(ctx, workloadClusterTemplate, input.Args...) + }, 10*time.Second).Should(Succeed(), "Failed to apply the cluster template") + + // Once we applied the cluster template we can run PreWaitForCluster. + // Note: This can e.g. be used to verify the BeforeClusterCreate lifecycle hook is executed + // and blocking correctly. + if input.PreWaitForCluster != nil { + By("Calling PreWaitForCluster") + input.PreWaitForCluster() + } + + By("Waiting for the cluster infrastructure to be provisioned") + result.Cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ + Getter: input.ClusterProxy.GetClient(), + Namespace: input.ConfigCluster.Namespace, + Name: input.ConfigCluster.ClusterName, + }, input.WaitForClusterIntervals...) + + By("Waiting for managed control plane to be initialized") + input.WaitForControlPlaneInitialized(ctx, input, result) + + By("Waiting for the machine pools to be provisioned") + result.MachinePools = framework.DiscoveryAndWaitForMachinePools(ctx, framework.DiscoveryAndWaitForMachinePoolsInput{ + Getter: input.ClusterProxy.GetClient(), + Lister: input.ClusterProxy.GetClient(), + Cluster: result.Cluster, + }, input.WaitForMachinePools...) + + if input.PostMachinesProvisioned != nil { + By("Calling PostMachinesProvisioned") + input.PostMachinesProvisioned() + } +} + +type ManagedControlPlaneResult struct { + clusterctl.ApplyClusterTemplateAndWaitResult + + ManagedControlPlane *infrav1exp.GCPManagedControlPlane +} + +// DiscoveryAndWaitFoManagedControlPlaneInitializedInput is the input type for DiscoveryAndWaitForManagedControlPlaneInitialized. +type DiscoveryAndWaitForManagedControlPlaneInitializedInput struct { + Lister framework.Lister + Cluster *clusterv1.Cluster +} + +// DiscoveryAndWaitForManagedControlPlaneInitialized discovers the KubeadmControlPlane object attached to a cluster and waits for it to be initialized. +func DiscoveryAndWaitForManagedControlPlaneInitialized(ctx context.Context, input DiscoveryAndWaitForManagedControlPlaneInitializedInput, intervals ...interface{}) *infrav1exp.GCPManagedControlPlane { + Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForManagedControlPlaneInitialized") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForManagedControlPlaneInitialized") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForManagedControlPlaneInitialized") + + By("Getting GCPManagedControlPlane control plane") + + var controlPlane *infrav1exp.GCPManagedControlPlane + Eventually(func(g Gomega) { + controlPlane = GetManagedControlPlaneByCluster(ctx, GetManagedControlPlaneByClusterInput{ + Lister: input.Lister, + ClusterName: input.Cluster.Name, + Namespace: input.Cluster.Namespace, + }) + g.Expect(controlPlane).ToNot(BeNil()) + }, "10s", "1s").Should(Succeed(), "Couldn't get the control plane for the cluster %s", klog.KObj(input.Cluster)) + + return controlPlane +} + +// GetManagedontrolPlaneByClusterInput is the input for GetManagedControlPlaneByCluster. +type GetManagedControlPlaneByClusterInput struct { + Lister framework.Lister + ClusterName string + Namespace string +} + +// GetManagedControlPlaneByCluster returns the GCPManagedControlPlane objects for a cluster. +func GetManagedControlPlaneByCluster(ctx context.Context, input GetManagedControlPlaneByClusterInput) *infrav1exp.GCPManagedControlPlane { + opts := []client.ListOption{ + client.InNamespace(input.Namespace), + client.MatchingLabels{ + clusterv1.ClusterLabelName: input.ClusterName, + }, + } + + controlPlaneList := &infrav1exp.GCPManagedControlPlaneList{} + Eventually(func() error { + return input.Lister.List(ctx, controlPlaneList, opts...) + }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list GCPManagedControlPlane object for Cluster %s", klog.KRef(input.Namespace, input.ClusterName)) + Expect(len(controlPlaneList.Items)).ToNot(BeNumerically(">", 1), "Cluster %s should not have more than 1 GCPManagedControlPlane object", klog.KRef(input.Namespace, input.ClusterName)) + if len(controlPlaneList.Items) == 1 { + return &controlPlaneList.Items[0] + } + return nil +} + +func setDefaults(input *ApplyManagedClusterTemplateAndWaitInput) { + if input.WaitForControlPlaneInitialized == nil { + input.WaitForControlPlaneInitialized = func(ctx context.Context, input ApplyManagedClusterTemplateAndWaitInput, result *ApplyManagedClusterTemplateAndWaitResult) { + result.ControlPlane = DiscoveryAndWaitForManagedControlPlaneInitialized(ctx, DiscoveryAndWaitForManagedControlPlaneInitializedInput{ + Lister: input.ClusterProxy.GetClient(), + Cluster: result.Cluster, + }, input.WaitForControlPlaneIntervals...) + } + } +} diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index f11e37fd6..36d4aa688 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -32,6 +32,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/runtime" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" + infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" @@ -186,6 +187,7 @@ func initScheme() *runtime.Scheme { scheme := runtime.NewScheme() framework.TryAddDefaultSchemes(scheme) Expect(infrav1.AddToScheme(scheme)).To(Succeed()) + Expect(infrav1exp.AddToScheme(scheme)).To(Succeed()) return scheme }