Skip to content

Commit

Permalink
Add Github action
Browse files Browse the repository at this point in the history
  • Loading branch information
eaudetcobello committed Sep 4, 2024
1 parent 45b5e97 commit bfb2a70
Show file tree
Hide file tree
Showing 3 changed files with 119 additions and 40 deletions.
40 changes: 40 additions & 0 deletions .github/workflows/e2e-deleteme.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
name: E2E Tests

on:
pull_request:

permissions:
contents: read

jobs:
run-e2e-tests:
name: Run E2E Tests
runs-on: [self-hosted, linux, X64, jammy, large]
env:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }}
AWS_REGION: us-east-2
strategy:
matrix:
ginkgo_focus:
#- "KCP remediation"
#- "MachineDeployment remediation"
- "Workload cluster creation"
#- "Workload cluster scaling"
#- "Workload cluster upgrade"
steps:
- name: Check out repo
uses: actions/checkout@v4
- name: Install requirements
run: |
sudo apt install make
sudo apt install wget
- name: Increase inotify watches
run: |
# Prevents https://cluster-api.sigs.k8s.io/user/troubleshooting#cluster-api-with-docker----too-many-open-files
sudo sysctl fs.inotify.max_user_watches=1048576
sudo sysctl fs.inotify.max_user_instances=8192
- name: Run e2e tests
run: |
sudo ./hack/ci-e2e-tests.sh true aws v0.1.2
2 changes: 2 additions & 0 deletions .github/workflows/e2e.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ name: E2E Tests

on:
pull_request:
branches:
- does-not-exist

permissions:
contents: read
Expand Down
117 changes: 77 additions & 40 deletions hack/ci-e2e-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,27 @@
set -xe

readonly HACK_DIR="$(realpath $(dirname "${0}"))"

cd "$HACK_DIR"

readonly SKIP_CLEANUP=${1:-true}
readonly
readonly INFRA_PROVIDER=${2:-aws}
readonly CK8S_PROVIDER_VERSION=${3:-v0.1.2}

readonly LXD_CHANNEL="5.21/stable"
readonly LXC_IMAGE="ubuntu:20.04"
readonly K8S_PROFILE_URL="https://github.com/raw/canonical/k8s-snap/main/tests/integration/lxd-profile.yaml"
readonly K8S_PROFILE_PATH="/tmp/k8s.profile"
readonly CONTAINER_NAME="k8s-test"

# User specific paths
readonly USER_CREDENTIALS_PATH="/home/user/.creds"
# Utility function for printing errors to stderr
function error_exit {
printf "ERROR: %s\n" "$1" >&2
return 1
}

function exec_in_container {
lxc exec $CONTAINER_NAME -- bash -c "$1"
}

# Install LXD snap
function install_lxd {
Expand All @@ -35,71 +43,100 @@ function setup_lxd_profile {
# Setup and configure the container
function setup_container {
lxc launch $LXC_IMAGE $CONTAINER_NAME -p default -p k8s
sleep 3 # Wait for the container to be up and running
lxc exec $CONTAINER_NAME -- bash -c "apt update && apt install -y snapd"
sleep 1
lxc exec $CONTAINER_NAME -- bash -c "systemctl start snapd"
until exec_in_container true; do
sleep 1
done

if [[ $INFRA_PROVIDER == "aws" ]]; then
set +x
lxc config set $CONTAINER_NAME environment.AWS_REGION "$AWS_REGION"
lxc config set $CONTAINER_NAME environment.AWS_SECRET_ACCESS_KEY "$AWS_SECRET_ACCESS_KEY"
lxc config set $CONTAINER_NAME environment.AWS_ACCESS_KEY_ID "$AWS_ACCESS_KEY_ID"
set -x
fi

exec_in_container "apt update && apt install -y snapd"
exec_in_container "systemctl start snapd"
}

# Main installation and configuration
function setup_management_cluster {
lxc exec $CONTAINER_NAME -- bash -c "snap install k8s --classic --edge"
sleep 5
exec_in_container "snap install k8s --classic --edge"
sleep 1
lxc exec $CONTAINER_NAME -- bash -c "snap install go --classic"
lxc exec $CONTAINER_NAME -- bash -c "mkdir -p /root/.kube"
lxc exec $CONTAINER_NAME -- bash -c "sudo k8s bootstrap"
lxc exec $CONTAINER_NAME -- bash -c "sudo k8s status --wait-ready"
lxc exec $CONTAINER_NAME -- bash -c "sudo k8s config > /root/.kube/config"
exec_in_container "snap install go --classic"
exec_in_container "mkdir -p /root/.kube"
exec_in_container "sudo k8s bootstrap"
exec_in_container "sudo k8s status --wait-ready"
exec_in_container "sudo k8s config > /root/.kube/config"
}

# Transfer and execute scripts
function install_tools {
for script in install-clusterctl.sh install-clusterctlawsadm.sh install-aws-nuke.sh write-provider-config.sh; do
lxc file push ./$script $CONTAINER_NAME/root/$script
if [[ ! $script == "write-provider-config.sh" ]]; then
lxc exec $CONTAINER_NAME -- bash -c "chmod +x /root/$script && /root/$script"
else
lxc exec $CONTAINER_NAME -- bash -c "mkdir -p /root/.cluster-api"
lxc exec $CONTAINER_NAME -- bash -c "chmod +x /root/$script && /root/$script /root/.cluster-api/clusterctl.yaml v0.1.2"
fi
done
tools=(install-clusterctl.sh)

if [[ $INFRA_PROVIDER == "aws" ]]; then
tools+=(install-clusterctlawsadm.sh install-aws-nuke.sh)
fi

lxc file push $USER_CREDENTIALS_PATH $CONTAINER_NAME/root/.creds
lxc exec $CONTAINER_NAME -- bash -c 'echo "source /root/.creds" >/root/.bashrc'
for script in "${tools[@]}"; do
lxc file push ./"$script" $CONTAINER_NAME/root/"$script"
exec_in_container "chmod +x /root/$script && /root/$script"
done
}

function init_clusterctl {
lxc exec $CONTAINER_NAME -- bash -c "source /root/.creds && clusterctl init -i aws -b ck8s:v0.1.2 -c ck8s:v0.1.2 --config /root/.cluster-api/clusterctl.yaml"
}
if [[ $INFRA_PROVIDER == "aws" ]]; then
#exec_in_container "clusterawsadm bootstrap iam create-cloudformation-stack --region $AWS_REGION"
true
fi

function run_e2e_tests {
make GINKGO.FOCUS="Workload cluster creation" test-e2e
lxc file push ./write-provider-config.sh $CONTAINER_NAME/root/write-provider-config.sh
exec_in_container "chmod +x /root/write-provider-config.sh"
exec_in_container "mkdir -p /root/.cluster-api"
exec_in_container "/root/write-provider-config.sh /root/.cluster-api/clusterctl.yaml $CK8S_PROVIDER_VERSION"
set +x
exec_in_container "echo $(clusterawsadm bootstrap credentials encode-as-profile) >> /root/.cluster-api/clusterctl.yaml"
set -x
exec_in_container "clusterctl init -i $INFRA_PROVIDER -b ck8s:$CK8S_PROVIDER_VERSION -c ck8s:$CK8S_PROVIDER_VERSION --config /root/.cluster-api/clusterctl.yaml"
}

function write_aws_nuke_config {
lxc exec $CONTAINER_NAME -- bash -c "mkdir -p /root/.aws-nuke"
lxc exec $CONTAINER_NAME -- bash -c "echo ""$AWS_NUKE_CONFIG"" > /root/.aws-nuke/config.yaml"
function run_e2e_tests {
make USE_EXISTING_CLUSTER=true GINKGO_FOCUS="Workload cluster creation" test-e2e
}

function cleanup {
write_aws_nuke_config
lxc exec $CONTAINER_NAME -- bash -c "aws-nuke --config /root/.aws-nuke/config.yaml --force"
if [[ $SKIP_CLEANUP == "true" ]]; then
return
fi

# Infra-specific cleanup
if [[ $INFRA_PROVIDER == "aws" ]]; then
exec_in_container "mkdir -p /root/.aws-nuke"
exec_in_container "echo ""$AWS_NUKE_CONFIG"" > /root/.aws-nuke/config.yaml"
exec_in_container "aws-nuke --config /root/.aws-nuke/config.yaml --force"
fi

lxc delete $CONTAINER_NAME --force
}

function main {
if [[ $INFRA_PROVIDER != "aws" ]]; then
error_exit "Unsupported infrastructure provider: $INFRA_PROVIDER"
exit 1
else
echo "Running e2e tests with $INFRA_PROVIDER provider and CK8s version $CK8S_PROVIDER_VERSION"
echo "The AWS provider depends on the following environment variables: AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY"
fi

install_lxd
setup_lxd_profile
setup_container
setup_management_cluster
install_tools
init_clusterctl
run_e2e_tests

if [[ $SKIP_CLEANUP == "false" ]]; then
write_aws_nuke_config
cleanup
fi
#run_e2e_tests
cleanup
}

main

0 comments on commit bfb2a70

Please sign in to comment.