diff --git a/Makefile b/Makefile index 74a1612b..b1de8246 100644 --- a/Makefile +++ b/Makefile @@ -99,7 +99,8 @@ GINKGO_NODES ?= 1 # GINKGO_NODES is the number of parallel nodes to run GINKGO_TIMEOUT ?= 2h GINKGO_POLL_PROGRESS_AFTER ?= 60m GINKGO_POLL_PROGRESS_INTERVAL ?= 5m -E2E_CONF_FILE ?= $(TEST_DIR)/e2e/config/ck8s-docker.yaml +E2E_INFRA ?= docker +E2E_CONF_FILE ?= $(TEST_DIR)/e2e/config/ck8s-$(E2E_INFRA).yaml SKIP_RESOURCE_CLEANUP ?= false USE_EXISTING_CLUSTER ?= false GINKGO_NOCOLOR ?= false diff --git a/test/e2e/README.md b/test/e2e/README.md index 849be79c..5c617bf5 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -21,6 +21,73 @@ To run a specific e2e test, such as `[PR-Blocking]`, use the `GINKGO_FOCUS` envi make GINKGO_FOCUS="\\[PR-Blocking\\]" test-e2e # only run e2e test with `[PR-Blocking]` in its spec name ``` +### Use an existing cluster as the management cluster + +This is useful if you want to use a cluster managed by Tilt. + +```shell +make USE_EXISTING_CLUSTER=true test-e2e +``` + +### Run e2e tests on AWS + +To run the tests on AWS you will need to set the AWS_B64ENCODED_CREDENTIALS environment variable. + +Then, you can run: + +```shell +make E2E_INFRA=aws test-e2e +``` + +Note: The remediation tests currently do not pass on cloud providers. We recommend excluding these tests from your test runs. + +For more information, please refer to the following: + +[Kubernetes Slack Discussion](kubernetes.slack.com/archives/C8TSNPY4T/p1680525266510109) + +[Github Issue #4198](github.com/kubernetes-sigs/cluster-api-provider-aws/issues/4198) + +### Running the tests with Tilt + +This section explains how to run the E2E tests on AWS using a management cluster run by Tilt. + +This section assumes you have *kind* and *Docker* installed. (See [Prerequisites](https://cluster-api.sigs.k8s.io/developer/tilt#prerequisites)) + +First, clone the upstream cluster-api and cluster-api-provider-aws repositories. +```shell +git clone https://github.com/kubernetes-sigs/cluster-api.git +git clone https://github.com/kubernetes-sigs/cluster-api-provider-aws.git +``` + +Next, you need to create a `tilt-settings.yaml` file inside the `cluster-api` directory. +The kustomize_substitutions you provide here are automatically applied to the *management cluster*. +```shell +default_registry: "ghcr.io/canonical/cluster-api-k8s" +provider_repos: +- ../cluster-api-k8s +- ../cluster-api-provider-aws +enable_providers: +- aws +- ck8s-bootstrap +- ck8s-control-plane +``` + +Tilt will know how to run the aws provider controllers because the `cluster-api-provider-aws` repository has a `tilt-provider.yaml` file at it's root. Canonical Kubernetes also provides this file at the root of the repository. The CK8s provider names, ck8s-bootstrap and ck8s-control-plane, are defined in CK8's `tilt-provider.yaml` file. + +Next, you have to customize the variables that will be substituted into the cluster templates applied by the tests (these are under `test/e2e/data/infrastructure-aws`). You can customize the variables in the `test/e2e/config/ck8s-aws.yaml` file under the `variables` key. + +Finally, in one terminal, go into the `cluster-api` directory and run `make tilt-up`. You should see a kind cluster be created, and finally a message indicating that Tilt is available at a certain address. + +In a second terminal in the `cluster-api-k8s` directory, run `make USE_EXISTING_CLUSTER=true test-e2e`. + +### Cleaning up after an e2e test + +The test framework tries it's best to cleanup resources after a test suite, but it is possible that +cloud resources are left over. This can be very problematic especially if you run the tests multiple times +while iterating on development (see [Cluster API Book - Tear down](https://cluster-api.sigs.k8s.io/developer/e2e#tear-down)). + +You can use a tool like [aws-nuke](https://github.com/eriksten/aws-nuke) to cleanup your AWS account after a test. + ## Develop an e2e test Refer to [Developing E2E tests](https://cluster-api.sigs.k8s.io/developer/e2e) for a complete guide for developing e2e tests. diff --git a/test/e2e/cluster_upgrade_test.go b/test/e2e/cluster_upgrade_test.go index 80dc7052..5b9841c1 100644 --- a/test/e2e/cluster_upgrade_test.go +++ b/test/e2e/cluster_upgrade_test.go @@ -22,6 +22,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) var _ = Describe("Workload cluster upgrade [CK8s-Upgrade]", func() { @@ -33,7 +34,7 @@ var _ = Describe("Workload cluster upgrade [CK8s-Upgrade]", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: ptr.To("docker"), + InfrastructureProvider: ptr.To(clusterctl.DefaultInfrastructureProvider), ControlPlaneMachineCount: ptr.To[int64](3), WorkerMachineCount: ptr.To[int64](1), } diff --git a/test/e2e/config/ck8s-aws.yaml b/test/e2e/config/ck8s-aws.yaml new file mode 100644 index 00000000..495b0d20 --- /dev/null +++ b/test/e2e/config/ck8s-aws.yaml @@ -0,0 +1,128 @@ +--- +managementClusterName: capi-test + +# E2E test scenario using local dev images and manifests built from the source tree for following providers: +# - bootstrap ck8s +# - control-plane ck8s +images: + # Use local dev images built source tree; + - name: ghcr.io/canonical/cluster-api-k8s/controlplane-controller:dev + loadBehavior: mustLoad + - name: ghcr.io/canonical/cluster-api-k8s/bootstrap-controller:dev + loadBehavior: mustLoad + +# List of providers that will be installed into the management cluster +# See InitManagementClusterAndWatchControllerLogs function call +providers: + - name: cluster-api + type: CoreProvider + versions: + - name: v1.8.4 + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.8.4/core-components.yaml + type: url + contract: v1beta1 + files: + - sourcePath: "../data/shared/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: aws + type: InfrastructureProvider + versions: + - name: v2.6.1 + value: "https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v2.6.1/infrastructure-components.yaml" + type: url + contract: v1beta2 + files: + - sourcePath: "../data/shared/v1beta1_aws/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + + # when bootstrapping with tilt, it will use + # https://github.com/kubernetes-sigs/cluster-api/blob/main/hack/tools/internal/tilt-prepare/main.go + # name here should match defaultProviderVersion + - name: v1.9.99 + value: "https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v2.6.1/infrastructure-components.yaml" + type: url + contract: v1beta2 + files: + - sourcePath: "../data/shared/v1beta1_aws/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + files: + - sourcePath: "../data/infrastructure-aws/cluster-template.yaml" + - name: ck8s + type: BootstrapProvider + versions: + # Could add older release version for upgrading test, but + # by default, will only use the latest version defined in + # ${ProjectRoot}/metadata.yaml to init the management cluster + # this version should be updated when ${ProjectRoot}/metadata.yaml + # is modified + - name: v0.1.99 # next; use manifest from source files + value: "../../../bootstrap/config/default" + replacements: + - old: "ghcr.io/canonical/cluster-api-k8s/bootstrap-controller:latest" + new: "ghcr.io/canonical/cluster-api-k8s/bootstrap-controller:dev" + files: + - sourcePath: "../../../metadata.yaml" + - name: ck8s + type: ControlPlaneProvider + versions: + - name: v0.1.99 # next; use manifest from source files + value: "../../../controlplane/config/default" + replacements: + - old: "ghcr.io/canonical/cluster-api-k8s/controlplane-controller:latest" + new: "ghcr.io/canonical/cluster-api-k8s/controlplane-controller:dev" + files: + - sourcePath: "../../../metadata.yaml" + +# These variables replace the variables in test/e2e/data/infrastructure-aws manifests +# They are used during clusterctl generate cluster +variables: + KUBERNETES_VERSION_MANAGEMENT: "v1.30.0" + KUBERNETES_VERSION: "v1.30.0" + KUBERNETES_VERSION_UPGRADE_TO: "v1.30.1" + IP_FAMILY: "IPv4" + KIND_IMAGE_VERSION: "v1.30.0" + AWS_CONTROL_PLANE_INSTANCE_TYPE: t3.large + AWS_NODE_INSTANCE_TYPE: t3.large + AWS_PUBLIC_IP: true + AWS_CREATE_BASTION: true + AWS_SSH_KEY_NAME: "default" + AWS_AMI_ID: "ami-01b139e6226d65e4f" + AWS_CONTROL_PLANE_ROOT_VOLUME_SIZE: 16 + AWS_NODE_ROOT_VOLUME_SIZE: 16 + AWS_REGION: "us-east-2" + AWS_CCM_IMAGE: "registry.k8s.io/provider-aws/cloud-controller-manager:v1.28.3" + # https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/test/e2e/data/e2e_conf.yaml#L203C1-L205C27 + # There is some work to be done here on figuring out which experimental features + # we want to enable/disable. + EXP_CLUSTER_RESOURCE_SET: "true" + EXP_MACHINE_SET_PREFLIGHT_CHECKS: "false" + CLUSTER_TOPOLOGY: "true" + CAPA_LOGLEVEL: "4" + +intervals: + # Ref: https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/test/e2e/data/e2e_conf.yaml + default/wait-machines: [ "35m", "10s" ] + default/wait-cluster: [ "35m", "10s" ] + default/wait-control-plane: [ "35m", "10s" ] + default/wait-worker-nodes: [ "35m", "10s" ] + conformance/wait-control-plane: [ "35m", "10s" ] + conformance/wait-worker-nodes: [ "35m", "10s" ] + default/wait-controllers: [ "35m", "10s" ] + default/wait-delete-cluster: [ "35m", "10s" ] + default/wait-machine-upgrade: [ "35m", "10s" ] + default/wait-contolplane-upgrade: [ "35m", "10s" ] + default/wait-machine-status: [ "35m", "10s" ] + default/wait-failed-machine-status: [ "35m", "10s" ] + default/wait-infra-subnets: [ "5m", "30s" ] + default/wait-machine-pool-nodes: [ "35m", "10s" ] + default/wait-machine-pool-upgrade: [ "35m", "10s" ] + default/wait-create-identity: [ "3m", "10s" ] + default/wait-job: [ "35m", "10s" ] + default/wait-deployment-ready: [ "35m", "10s" ] + default/wait-loadbalancer-ready: [ "5m", "30s" ] diff --git a/test/e2e/config/ck8s-docker.yaml b/test/e2e/config/ck8s-docker.yaml index 8dba00e0..a5175491 100644 --- a/test/e2e/config/ck8s-docker.yaml +++ b/test/e2e/config/ck8s-docker.yaml @@ -15,8 +15,8 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: v1.6.2 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/core-components.yaml + - name: v1.8.4 + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.8.4/core-components.yaml type: url files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" @@ -28,8 +28,8 @@ providers: versions: # By default, will use the latest version defined in ../data/shared/v1beta1/metadata.yaml # to init the management cluster - - name: v1.6.2 # used during e2e-test - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/infrastructure-components-development.yaml + - name: v1.8.4 # used during e2e-test + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.8.4/infrastructure-components-development.yaml type: url files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go index 81dd2b8f..41c0e762 100644 --- a/test/e2e/create_test.go +++ b/test/e2e/create_test.go @@ -48,7 +48,7 @@ var _ = Describe("Workload cluster creation", func() { Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion)) clusterName = fmt.Sprintf("capick8s-create-%s", util.RandomString(6)) - infrastructureProvider = "docker" + infrastructureProvider = clusterctl.DefaultInfrastructureProvider // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) diff --git a/test/e2e/data/infrastructure-aws/cluster-template.yaml b/test/e2e/data/infrastructure-aws/cluster-template.yaml new file mode 100644 index 00000000..e0a96714 --- /dev/null +++ b/test/e2e/data/infrastructure-aws/cluster-template.yaml @@ -0,0 +1,315 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + labels: + ccm: external +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.1.0.0/16 + services: + cidrBlocks: + - 10.152.183.0/24 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: CK8sControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: AWSCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSCluster +metadata: + name: ${CLUSTER_NAME} +spec: + region: ${AWS_REGION} + sshKeyName: ${AWS_SSH_KEY_NAME} + bastion: + enabled: ${AWS_CREATE_BASTION} + controlPlaneLoadBalancer: + healthCheckProtocol: TCP + network: + cni: + cniIngressRules: + - description: microcluster + protocol: tcp + toPort: 2380 +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +kind: CK8sControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + machineTemplate: + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: AWSMachineTemplate + name: ${CLUSTER_NAME}-control-plane + spec: + nodeName: "{{ ds.meta_data.local_hostname }}" + channel: "${KUBERNETES_VERSION}-classic/edge" + controlPlane: + cloudProvider: external + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + template: + spec: + ami: + id: ${AWS_AMI_ID} + iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io + instanceType: ${AWS_CONTROL_PLANE_INSTANCE_TYPE} + publicIP: ${AWS_PUBLIC_IP} + sshKeyName: ${AWS_SSH_KEY_NAME} + rootVolume: + size: ${AWS_CONTROL_PLANE_ROOT_VOLUME_SIZE} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-worker-md-0 +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + template: + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: CK8sConfigTemplate + name: ${CLUSTER_NAME}-md-0 + infrastructureRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: AWSMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + ami: + id: ${AWS_AMI_ID} + iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io + instanceType: ${AWS_NODE_INSTANCE_TYPE} + publicIP: ${AWS_PUBLIC_IP} + sshKeyName: ${AWS_SSH_KEY_NAME} + rootVolume: + size: ${AWS_NODE_ROOT_VOLUME_SIZE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 +kind: CK8sConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + nodeName: "{{ ds.meta_data.local_hostname }}" + channel: "${KUBERNETES_VERSION}-classic/edge" +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: crs-ccm +spec: + clusterSelector: + matchLabels: + ccm: external + resources: + - kind: ConfigMap + name: cloud-controller-manager-addon + strategy: ApplyOnce +--- +apiVersion: v1 +data: + aws-ccm-external.yaml: | + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: aws-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: aws-cloud-controller-manager + spec: + selector: + matchLabels: + k8s-app: aws-cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: aws-cloud-controller-manager + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + serviceAccountName: cloud-controller-manager + containers: + - name: aws-cloud-controller-manager + image: ${AWS_CCM_IMAGE} + args: + - --v=2 + - --cloud-provider=aws + - --use-service-account-credentials=true + - --configure-cloud-routes=false + resources: + requests: + cpu: 200m + hostNetwork: true + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: cloud-controller-manager:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +kind: ConfigMap +metadata: + name: cloud-controller-manager-addon diff --git a/test/e2e/data/shared/v1beta1/metadata.yaml b/test/e2e/data/shared/v1beta1/metadata.yaml index 92b9968a..b7d38077 100644 --- a/test/e2e/data/shared/v1beta1/metadata.yaml +++ b/test/e2e/data/shared/v1beta1/metadata.yaml @@ -1,9 +1,12 @@ -# maps release series of major.minor to cluster-api contract version, -# update this file only when you update the version for cluster-api -# CoreProvider and docker InfrastructureProvider in test/e2e/config/k3s-docker.yaml apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: + - major: 1 + minor: 8 + contract: v1beta1 + - major: 1 + minor: 7 + contract: v1beta1 - major: 1 minor: 6 contract: v1beta1 diff --git a/test/e2e/data/shared/v1beta1_aws/metadata.yaml b/test/e2e/data/shared/v1beta1_aws/metadata.yaml new file mode 100644 index 00000000..8e288cef --- /dev/null +++ b/test/e2e/data/shared/v1beta1_aws/metadata.yaml @@ -0,0 +1,5 @@ +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +releaseSeries: + - major: 2 + minor: 6 + contract: v1beta1 diff --git a/test/e2e/helpers.go b/test/e2e/helpers.go index 23a521b7..4f3284d1 100644 --- a/test/e2e/helpers.go +++ b/test/e2e/helpers.go @@ -525,7 +525,7 @@ type WaitForControlPlaneAndMachinesReadyInput struct { ControlPlane *controlplanev1.CK8sControlPlane } -// WaitForControlPlaneAndMachinesReady waits for a KThreeControlPlane object to be ready (all the machine provisioned and one node ready). +// WaitForControlPlaneAndMachinesReady waits for a CK8sControlPlane object to be ready (all the machine provisioned and one node ready). func WaitForControlPlaneAndMachinesReady(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, intervals ...interface{}) { Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForControlPlaneReady") Expect(input.GetLister).ToNot(BeNil(), "Invalid argument. input.GetLister can't be nil when calling WaitForControlPlaneReady") diff --git a/test/e2e/kcp_remediation_test.go b/test/e2e/kcp_remediation_test.go index 881f1907..3c3b220e 100644 --- a/test/e2e/kcp_remediation_test.go +++ b/test/e2e/kcp_remediation_test.go @@ -23,9 +23,16 @@ import ( . "github.com/onsi/ginkgo/v2" "k8s.io/utils/ptr" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) var _ = Describe("When testing KCP remediation", func() { + // See kubernetes.slack.com/archives/C8TSNPY4T/p1680525266510109 + // And github.com/kubernetes-sigs/cluster-api-provider-aws/issues/4198 + if clusterctl.DefaultInfrastructureProvider == "aws" { + Skip("Skipping KCP remediation test for AWS") + } + capi_e2e.KCPRemediationSpec(ctx, func() capi_e2e.KCPRemediationSpecInput { return capi_e2e.KCPRemediationSpecInput{ E2EConfig: e2eConfig, @@ -33,6 +40,7 @@ var _ = Describe("When testing KCP remediation", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: ptr.To("docker")} + InfrastructureProvider: ptr.To(clusterctl.DefaultInfrastructureProvider), + } }) }) diff --git a/test/e2e/md_remediation_test.go b/test/e2e/md_remediation_test.go index 4f707ba2..5f40620e 100644 --- a/test/e2e/md_remediation_test.go +++ b/test/e2e/md_remediation_test.go @@ -34,6 +34,12 @@ import ( ) var _ = Describe("When testing MachineDeployment remediation", func() { + // See kubernetes.slack.com/archives/C8TSNPY4T/p1680525266510109 + // And github.com/kubernetes-sigs/cluster-api-provider-aws/issues/4198 + if clusterctl.DefaultInfrastructureProvider == "aws" { + Skip("Skipping KCP remediation test for AWS") + } + var ( ctx = context.TODO() specName = "machine-deployment-remediation" @@ -49,7 +55,7 @@ var _ = Describe("When testing MachineDeployment remediation", func() { Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion)) clusterName = fmt.Sprintf("capick8s-md-remediation-%s", util.RandomString(6)) - infrastructureProvider = "docker" + infrastructureProvider = clusterctl.DefaultInfrastructureProvider // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) diff --git a/test/e2e/node_scale_test.go b/test/e2e/node_scale_test.go index e295b450..4652e2f5 100644 --- a/test/e2e/node_scale_test.go +++ b/test/e2e/node_scale_test.go @@ -48,7 +48,7 @@ var _ = Describe("Workload cluster scaling", func() { Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion)) clusterName = fmt.Sprintf("capick8s-node-scale-%s", util.RandomString(6)) - infrastructureProvider = "docker" + infrastructureProvider = clusterctl.DefaultInfrastructureProvider // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) diff --git a/tilt-provider.yaml b/tilt-provider.yaml new file mode 100644 index 00000000..d715c79b --- /dev/null +++ b/tilt-provider.yaml @@ -0,0 +1,24 @@ +- name: ck8s-bootstrap + config: + context: bootstrap + image: ghcr.io/canonical/cluster-api-k8s/bootstrap-controller + live_reload_deps: + - main.go + - api + - controllers + - ../go.mod + - ../go.sum + - ../pkg + label: CABPCK +- name: ck8s-control-plane + config: + context: controlplane + image: ghcr.io/canonical/cluster-api-k8s/controlplane-controller + live_reload_deps: + - main.go + - api + - controllers + - ../go.mod + - ../go.sum + - ../pkg + label: CACPPCK