From 47dd9cfb48f13bf9643c12cead29dafdf36e7791 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 26 Mar 2024 17:05:16 -0500 Subject: [PATCH 01/62] Initial commit to refactor context usage within k8s code --- src/cmd/connect.go | 13 +- src/cmd/destroy.go | 16 +- src/cmd/dev.go | 7 +- src/cmd/initialize.go | 7 +- src/cmd/internal.go | 24 ++- src/cmd/package.go | 28 +++- src/cmd/tools/crane.go | 25 ++- src/cmd/tools/zarf.go | 26 ++- src/extensions/bigbang/test/bigbang_test.go | 6 +- src/internal/packager/git/gitea.go | 25 +-- src/internal/packager/helm/post-render.go | 28 ++-- src/internal/packager/helm/zarf.go | 26 ++- src/internal/packager/images/push.go | 5 +- src/pkg/cluster/common.go | 13 +- src/pkg/cluster/data.go | 7 +- src/pkg/cluster/injector.go | 96 ++++++----- src/pkg/cluster/namespace.go | 4 +- src/pkg/cluster/secrets.go | 17 +- src/pkg/cluster/state.go | 27 +-- src/pkg/cluster/tunnel.go | 27 +-- src/pkg/cluster/zarf.go | 73 ++++---- src/pkg/k8s/common.go | 31 +--- src/pkg/k8s/configmap.go | 19 +-- src/pkg/k8s/dynamic.go | 14 +- src/pkg/k8s/hpa.go | 16 +- src/pkg/k8s/info.go | 11 +- src/pkg/k8s/namespace.go | 29 ++-- src/pkg/k8s/nodes.go | 8 +- src/pkg/k8s/pods.go | 175 ++++++++++---------- src/pkg/k8s/sa.go | 31 ++-- src/pkg/k8s/secrets.go | 24 +-- src/pkg/k8s/services.go | 34 ++-- src/pkg/k8s/tunnel.go | 41 +++-- src/pkg/packager/common.go | 20 +-- src/pkg/packager/common_test.go | 3 +- src/pkg/packager/deploy.go | 76 ++++----- src/pkg/packager/dev.go | 7 +- src/pkg/packager/mirror.go | 11 +- src/pkg/packager/remove.go | 23 +-- src/pkg/packager/sources/cluster.go | 13 +- src/test/e2e/21_connect_creds_test.go | 15 +- src/test/e2e/22_git_and_gitops_test.go | 16 +- src/test/e2e/23_data_injection_test.go | 2 +- src/test/e2e/26_simple_packages_test.go | 3 +- src/test/e2e/99_yolo_test.go | 3 +- src/test/external/ext_in_cluster_test.go | 6 +- src/test/external/ext_out_cluster_test.go | 6 +- 47 files changed, 621 insertions(+), 516 deletions(-) diff --git a/src/cmd/connect.go b/src/cmd/connect.go index 39b382cad3..991470ee89 100644 --- a/src/cmd/connect.go +++ b/src/cmd/connect.go @@ -5,10 +5,12 @@ package cmd import ( + "context" "fmt" "os" "os/signal" "syscall" + "time" "github.com/defenseunicorns/zarf/src/cmd/common" "github.com/defenseunicorns/zarf/src/config/lang" @@ -43,12 +45,15 @@ var ( spinner.Fatalf(err, lang.CmdConnectErrCluster, err.Error()) } + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + var tunnel *k8s.Tunnel if connectResourceName != "" { zt := cluster.NewTunnelInfo(connectNamespace, connectResourceType, connectResourceName, "", connectLocalPort, connectRemotePort) - tunnel, err = c.ConnectTunnelInfo(zt) + tunnel, err = c.ConnectTunnelInfo(ctx, zt) } else { - tunnel, err = c.Connect(target) + tunnel, err = c.Connect(ctx, target) } if err != nil { spinner.Fatalf(err, lang.CmdConnectErrService, err.Error()) @@ -91,7 +96,9 @@ var ( Aliases: []string{"l"}, Short: lang.CmdConnectListShort, Run: func(_ *cobra.Command, _ []string) { - cluster.NewClusterOrDie().PrintConnectTable() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + cluster.NewClusterOrDie(ctx).PrintConnectTable(ctx) }, } ) diff --git a/src/cmd/destroy.go b/src/cmd/destroy.go index bc9dbe7d61..56784b2df5 100644 --- a/src/cmd/destroy.go +++ b/src/cmd/destroy.go @@ -5,9 +5,11 @@ package cmd import ( + "context" "errors" "os" "regexp" + "time" "github.com/defenseunicorns/pkg/helpers" "github.com/defenseunicorns/zarf/src/config" @@ -29,7 +31,13 @@ var destroyCmd = &cobra.Command{ Short: lang.CmdDestroyShort, Long: lang.CmdDestroyLong, Run: func(_ *cobra.Command, _ []string) { - c, err := cluster.NewClusterWithWait(cluster.DefaultTimeout) + ctxLong, cancelLong := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancelLong() + + ctxShort, cancelShort := context.WithTimeout(context.Background(), 30*time.Second) + defer cancelShort() + + c, err := cluster.NewClusterWithWait(ctxShort) if err != nil { message.Fatalf(err, lang.ErrNoClusterConnection) } @@ -37,7 +45,7 @@ var destroyCmd = &cobra.Command{ // NOTE: If 'zarf init' failed to deploy the k3s component (or if we're looking at the wrong kubeconfig) // there will be no zarf-state to load and the struct will be empty. In these cases, if we can find // the scripts to remove k3s, we will still try to remove a locally installed k3s cluster - state, err := c.LoadZarfState() + state, err := c.LoadZarfState(ctxShort) if err != nil { message.WarnErr(err, lang.ErrLoadState) } @@ -74,10 +82,10 @@ var destroyCmd = &cobra.Command{ helm.Destroy(removeComponents) // If Zarf didn't deploy the cluster, only delete the ZarfNamespace - c.DeleteZarfNamespace() + c.DeleteZarfNamespace(ctxLong) // Remove zarf agent labels and secrets from namespaces Zarf doesn't manage - c.StripZarfLabelsAndSecretsFromNamespaces() + c.StripZarfLabelsAndSecretsFromNamespaces(ctxLong) } }, } diff --git a/src/cmd/dev.go b/src/cmd/dev.go index 92a46e3bcf..c3ed6f5543 100644 --- a/src/cmd/dev.go +++ b/src/cmd/dev.go @@ -5,11 +5,13 @@ package cmd import ( + "context" "fmt" "io" "os" "path/filepath" "strings" + "time" "github.com/AlecAivazis/survey/v2" "github.com/defenseunicorns/pkg/helpers" @@ -54,8 +56,11 @@ var devDeployCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + // Create the package - if err := pkgClient.DevDeploy(); err != nil { + if err := pkgClient.DevDeploy(ctx); err != nil { message.Fatalf(err, lang.CmdDevDeployErr, err.Error()) } }, diff --git a/src/cmd/initialize.go b/src/cmd/initialize.go index cdb6ad05f6..8c829a632c 100644 --- a/src/cmd/initialize.go +++ b/src/cmd/initialize.go @@ -5,12 +5,14 @@ package cmd import ( + "context" "errors" "fmt" "os" "path" "path/filepath" "strings" + "time" "github.com/AlecAivazis/survey/v2" "github.com/defenseunicorns/pkg/helpers" @@ -67,8 +69,11 @@ var initCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig, packager.WithSource(src)) defer pkgClient.ClearTempPaths() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + // Deploy everything - err = pkgClient.Deploy() + err = pkgClient.Deploy(ctx) if err != nil { message.Fatal(err, err.Error()) } diff --git a/src/cmd/internal.go b/src/cmd/internal.go index 213385e8e3..96078d5c70 100644 --- a/src/cmd/internal.go +++ b/src/cmd/internal.go @@ -5,9 +5,11 @@ package cmd import ( + "context" "encoding/json" "fmt" "os" + "time" "github.com/alecthomas/jsonschema" "github.com/defenseunicorns/pkg/helpers" @@ -161,14 +163,17 @@ var createReadOnlyGiteaUser = &cobra.Command{ Short: lang.CmdInternalCreateReadOnlyGiteaUserShort, Long: lang.CmdInternalCreateReadOnlyGiteaUserLong, Run: func(_ *cobra.Command, _ []string) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + // Load the state so we can get the credentials for the admin git user - state, err := cluster.NewClusterOrDie().LoadZarfState() + state, err := cluster.NewClusterOrDie(ctx).LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) } // Create the non-admin user - if err = git.New(state.GitServer).CreateReadOnlyUser(); err != nil { + if err = git.New(state.GitServer).CreateReadOnlyUser(ctx); err != nil { message.WarnErr(err, lang.CmdInternalCreateReadOnlyGiteaUserErr) } }, @@ -179,23 +184,26 @@ var createPackageRegistryToken = &cobra.Command{ Short: lang.CmdInternalArtifactRegistryGiteaTokenShort, Long: lang.CmdInternalArtifactRegistryGiteaTokenLong, Run: func(_ *cobra.Command, _ []string) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + // Load the state so we can get the credentials for the admin git user - c := cluster.NewClusterOrDie() - state, err := c.LoadZarfState() + c := cluster.NewClusterOrDie(ctx) + state, err := c.LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) } // If we are setup to use an internal artifact server, create the artifact registry token if state.ArtifactServer.InternalServer { - token, err := git.New(state.GitServer).CreatePackageRegistryToken() + token, err := git.New(state.GitServer).CreatePackageRegistryToken(ctx) if err != nil { message.WarnErr(err, lang.CmdInternalArtifactRegistryGiteaTokenErr) } state.ArtifactServer.PushToken = token.Sha1 - c.SaveZarfState(state) + c.SaveZarfState(ctx, state) } }, } @@ -205,9 +213,11 @@ var updateGiteaPVC = &cobra.Command{ Short: lang.CmdInternalUpdateGiteaPVCShort, Long: lang.CmdInternalUpdateGiteaPVCLong, Run: func(_ *cobra.Command, _ []string) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() // There is a possibility that the pvc does not yet exist and Gitea helm chart should create it - helmShouldCreate, err := git.UpdateGiteaPVC(rollback) + helmShouldCreate, err := git.UpdateGiteaPVC(ctx, rollback) if err != nil { message.WarnErr(err, lang.CmdInternalUpdateGiteaPVCErr) } diff --git a/src/cmd/package.go b/src/cmd/package.go index e3fb00b48a..6378556621 100644 --- a/src/cmd/package.go +++ b/src/cmd/package.go @@ -5,10 +5,12 @@ package cmd import ( + "context" "fmt" "path/filepath" "regexp" "strings" + "time" "github.com/defenseunicorns/zarf/src/cmd/common" "github.com/defenseunicorns/zarf/src/config/lang" @@ -84,8 +86,11 @@ var packageDeployCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + // Deploy the package - if err := pkgClient.Deploy(); err != nil { + if err := pkgClient.Deploy(ctx); err != nil { message.Fatalf(err, lang.CmdPackageDeployErr, err.Error()) } }, @@ -105,8 +110,10 @@ var packageMirrorCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Deploy the package - if err := pkgClient.Mirror(); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + if err := pkgClient.Mirror(ctx); err != nil { message.Fatalf(err, lang.CmdPackageDeployErr, err.Error()) } }, @@ -140,8 +147,11 @@ var packageListCmd = &cobra.Command{ Aliases: []string{"l", "ls"}, Short: lang.CmdPackageListShort, Run: func(_ *cobra.Command, _ []string) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + // Get all the deployed packages - deployedZarfPackages, errs := cluster.NewClusterOrDie().GetDeployedZarfPackages() + deployedZarfPackages, errs := cluster.NewClusterOrDie(ctx).GetDeployedZarfPackages(ctx) if len(errs) > 0 && len(deployedZarfPackages) == 0 { message.Fatalf(errs, lang.CmdPackageListNoPackageWarn) } @@ -185,7 +195,10 @@ var packageRemoveCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig, packager.WithSource(src)) defer pkgClient.ClearTempPaths() - if err := pkgClient.Remove(); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + if err := pkgClient.Remove(ctx); err != nil { message.Fatalf(err, lang.CmdPackageRemoveErr, err.Error()) } }, @@ -296,8 +309,11 @@ func getPackageCompletionArgs(_ *cobra.Command, _ []string, _ string) ([]string, return pkgCandidates, cobra.ShellCompDirectiveDefault } + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + // Get all the deployed packages - deployedZarfPackages, _ := c.GetDeployedZarfPackages() + deployedZarfPackages, _ := c.GetDeployedZarfPackages(ctx) // Populate list of package names for _, pkg := range deployedZarfPackages { pkgCandidates = append(pkgCandidates, pkg.Name) diff --git a/src/cmd/tools/crane.go b/src/cmd/tools/crane.go index b6fd47e5a0..7485754b7f 100644 --- a/src/cmd/tools/crane.go +++ b/src/cmd/tools/crane.go @@ -5,9 +5,11 @@ package tools import ( + "context" "fmt" "os" "strings" + "time" "github.com/AlecAivazis/survey/v2" "github.com/defenseunicorns/zarf/src/cmd/common" @@ -123,13 +125,16 @@ func zarfCraneCatalog(cranePlatformOptions *[]crane.Option) *cobra.Command { return err } + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + // Load Zarf state - zarfState, err := c.LoadZarfState() + zarfState, err := c.LoadZarfState(ctx) if err != nil { return err } - registryEndpoint, tunnel, err := c.ConnectToZarfRegistryEndpoint(zarfState.RegistryInfo) + registryEndpoint, tunnel, err := c.ConnectToZarfRegistryEndpoint(ctx, zarfState.RegistryInfo) if err != nil { return err } @@ -172,8 +177,11 @@ func zarfCraneInternalWrapper(commandToWrap func(*[]crane.Option) *cobra.Command message.Note(lang.CmdToolsRegistryZarfState) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + // Load the state (if able) - zarfState, err := c.LoadZarfState() + zarfState, err := c.LoadZarfState(ctx) if err != nil { message.Warnf(lang.CmdToolsCraneConnectedButBadStateErr, err.Error()) return originalListFn(cmd, args) @@ -184,7 +192,7 @@ func zarfCraneInternalWrapper(commandToWrap func(*[]crane.Option) *cobra.Command return originalListFn(cmd, args) } - _, tunnel, err := c.ConnectToZarfRegistryEndpoint(zarfState.RegistryInfo) + _, tunnel, err := c.ConnectToZarfRegistryEndpoint(ctx, zarfState.RegistryInfo) if err != nil { return err } @@ -217,20 +225,23 @@ func pruneImages(_ *cobra.Command, _ []string) error { return err } + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + // Load the state - zarfState, err := c.LoadZarfState() + zarfState, err := c.LoadZarfState(ctx) if err != nil { return err } // Load the currently deployed packages - zarfPackages, errs := c.GetDeployedZarfPackages() + zarfPackages, errs := c.GetDeployedZarfPackages(ctx) if len(errs) > 0 { return lang.ErrUnableToGetPackages } // Set up a tunnel to the registry if applicable - registryEndpoint, tunnel, err := c.ConnectToZarfRegistryEndpoint(zarfState.RegistryInfo) + registryEndpoint, tunnel, err := c.ConnectToZarfRegistryEndpoint(ctx, zarfState.RegistryInfo) if err != nil { return err } diff --git a/src/cmd/tools/zarf.go b/src/cmd/tools/zarf.go index 7cec63379d..2efd582aea 100644 --- a/src/cmd/tools/zarf.go +++ b/src/cmd/tools/zarf.go @@ -5,8 +5,10 @@ package tools import ( + "context" "fmt" "os" + "time" "slices" @@ -51,7 +53,10 @@ var getCredsCmd = &cobra.Command{ Aliases: []string{"gc"}, Args: cobra.MaximumNArgs(1), Run: func(_ *cobra.Command, args []string) { - state, err := cluster.NewClusterOrDie().LoadZarfState() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + state, err := cluster.NewClusterOrDie(ctx).LoadZarfState(ctx) if err != nil || state.Distro == "" { // If no distro the zarf secret did not load properly message.Fatalf(nil, lang.ErrLoadState) @@ -84,8 +89,11 @@ var updateCredsCmd = &cobra.Command{ } } - c := cluster.NewClusterOrDie() - oldState, err := c.LoadZarfState() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + c := cluster.NewClusterOrDie(ctx) + oldState, err := c.LoadZarfState(ctx) if err != nil || oldState.Distro == "" { // If no distro the zarf secret did not load properly message.Fatalf(nil, lang.ErrLoadState) @@ -113,16 +121,16 @@ var updateCredsCmd = &cobra.Command{ if confirm { // Update registry and git pull secrets if slices.Contains(args, message.RegistryKey) { - c.UpdateZarfManagedImageSecrets(newState) + c.UpdateZarfManagedImageSecrets(ctx, newState) } if slices.Contains(args, message.GitKey) { - c.UpdateZarfManagedGitSecrets(newState) + c.UpdateZarfManagedGitSecrets(ctx, newState) } // Update artifact token (if internal) if slices.Contains(args, message.ArtifactKey) && newState.ArtifactServer.PushToken == "" && newState.ArtifactServer.InternalServer { g := git.New(oldState.GitServer) - tokenResponse, err := g.CreatePackageRegistryToken() + tokenResponse, err := g.CreatePackageRegistryToken(ctx) if err != nil { // Warn if we couldn't actually update the git server (it might not be installed and we should try to continue) message.Warnf(lang.CmdToolsUpdateCredsUnableCreateToken, err.Error()) @@ -132,7 +140,7 @@ var updateCredsCmd = &cobra.Command{ } // Save the final Zarf State - err = c.SaveZarfState(newState) + err = c.SaveZarfState(ctx, newState) if err != nil { message.Fatalf(err, lang.ErrSaveState) } @@ -149,14 +157,14 @@ var updateCredsCmd = &cobra.Command{ } if slices.Contains(args, message.GitKey) && newState.GitServer.InternalServer { g := git.New(newState.GitServer) - err = g.UpdateZarfGiteaUsers(oldState) + err = g.UpdateZarfGiteaUsers(ctx, oldState) if err != nil { // Warn if we couldn't actually update the git server (it might not be installed and we should try to continue) message.Warnf(lang.CmdToolsUpdateCredsUnableUpdateGit, err.Error()) } } if slices.Contains(args, message.AgentKey) { - err = h.UpdateZarfAgentValues() + err = h.UpdateZarfAgentValues(ctx) if err != nil { // Warn if we couldn't actually update the agent (it might not be installed and we should try to continue) message.Warnf(lang.CmdToolsUpdateCredsUnableUpdateAgent, err.Error()) diff --git a/src/extensions/bigbang/test/bigbang_test.go b/src/extensions/bigbang/test/bigbang_test.go index d36e94f590..be09a3000b 100644 --- a/src/extensions/bigbang/test/bigbang_test.go +++ b/src/extensions/bigbang/test/bigbang_test.go @@ -117,10 +117,10 @@ func TestReleases(t *testing.T) { require.NoError(t, err, stdOut, stdErr) // Test connectivity to Twistlock - testConnection(t) + testConnection(t, context.TODO()) } -func testConnection(t *testing.T) { +func testConnection(t *testing.T, ctx context.Context) { // Establish the tunnel config c, err := cluster.NewCluster() require.NoError(t, err) @@ -128,7 +128,7 @@ func testConnection(t *testing.T) { require.NoError(t, err) // Establish the tunnel connection - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) require.NoError(t, err) defer tunnel.Close() diff --git a/src/internal/packager/git/gitea.go b/src/internal/packager/git/gitea.go index f7bd86b00c..d243337d3c 100644 --- a/src/internal/packager/git/gitea.go +++ b/src/internal/packager/git/gitea.go @@ -6,6 +6,7 @@ package git import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -31,7 +32,7 @@ type CreateTokenResponse struct { } // CreateReadOnlyUser uses the Gitea API to create a non-admin Zarf user. -func (g *Git) CreateReadOnlyUser() error { +func (g *Git) CreateReadOnlyUser(ctx context.Context) error { message.Debugf("git.CreateReadOnlyUser()") c, err := cluster.NewCluster() @@ -44,7 +45,7 @@ func (g *Git) CreateReadOnlyUser() error { if err != nil { return err } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return err } @@ -102,16 +103,16 @@ func (g *Git) CreateReadOnlyUser() error { } // UpdateZarfGiteaUsers updates Zarf gitea users -func (g *Git) UpdateZarfGiteaUsers(oldState *types.ZarfState) error { +func (g *Git) UpdateZarfGiteaUsers(ctx context.Context, oldState *types.ZarfState) error { //Update git read only user password - err := g.UpdateGitUser(oldState.GitServer.PushPassword, g.Server.PullUsername, g.Server.PullPassword) + err := g.UpdateGitUser(ctx, oldState.GitServer.PushPassword, g.Server.PullUsername, g.Server.PullPassword) if err != nil { return fmt.Errorf("unable to update gitea read only user password: %w", err) } // Update Git admin password - err = g.UpdateGitUser(oldState.GitServer.PushPassword, g.Server.PushUsername, g.Server.PushPassword) + err = g.UpdateGitUser(ctx, oldState.GitServer.PushPassword, g.Server.PushUsername, g.Server.PushPassword) if err != nil { return fmt.Errorf("unable to update gitea admin user password: %w", err) } @@ -119,7 +120,7 @@ func (g *Git) UpdateZarfGiteaUsers(oldState *types.ZarfState) error { } // UpdateGitUser updates Zarf git server users -func (g *Git) UpdateGitUser(oldAdminPass string, username string, userpass string) error { +func (g *Git) UpdateGitUser(ctx context.Context, oldAdminPass string, username string, userpass string) error { message.Debugf("git.UpdateGitUser()") c, err := cluster.NewCluster() @@ -131,7 +132,7 @@ func (g *Git) UpdateGitUser(oldAdminPass string, username string, userpass strin if err != nil { return err } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return err } @@ -157,7 +158,7 @@ func (g *Git) UpdateGitUser(oldAdminPass string, username string, userpass strin } // CreatePackageRegistryToken uses the Gitea API to create a package registry token. -func (g *Git) CreatePackageRegistryToken() (CreateTokenResponse, error) { +func (g *Git) CreatePackageRegistryToken(ctx context.Context) (CreateTokenResponse, error) { message.Debugf("git.CreatePackageRegistryToken()") c, err := cluster.NewCluster() @@ -170,7 +171,7 @@ func (g *Git) CreatePackageRegistryToken() (CreateTokenResponse, error) { if err != nil { return CreateTokenResponse{}, err } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return CreateTokenResponse{}, err } @@ -245,7 +246,7 @@ func (g *Git) CreatePackageRegistryToken() (CreateTokenResponse, error) { } // UpdateGiteaPVC updates the existing Gitea persistent volume claim and tells Gitea whether to create or not. -func UpdateGiteaPVC(shouldRollBack bool) (string, error) { +func UpdateGiteaPVC(ctx context.Context, shouldRollBack bool) (string, error) { c, err := cluster.NewCluster() if err != nil { return "false", err @@ -260,12 +261,12 @@ func UpdateGiteaPVC(shouldRollBack bool) (string, error) { annotations := map[string]string{"meta.helm.sh/release-name": "zarf-gitea", "meta.helm.sh/release-namespace": "zarf"} if shouldRollBack { - err = c.K8s.RemoveLabelsAndAnnotations(cluster.ZarfNamespaceName, pvcName, groupKind, labels, annotations) + err = c.K8s.RemoveLabelsAndAnnotations(ctx, cluster.ZarfNamespaceName, pvcName, groupKind, labels, annotations) return "false", err } if pvcName == "data-zarf-gitea-0" { - err = c.K8s.AddLabelsAndAnnotations(cluster.ZarfNamespaceName, pvcName, groupKind, labels, annotations) + err = c.K8s.AddLabelsAndAnnotations(ctx, cluster.ZarfNamespaceName, pvcName, groupKind, labels, annotations) return "true", err } diff --git a/src/internal/packager/helm/post-render.go b/src/internal/packager/helm/post-render.go index ed3dec5d9f..169dc8f86d 100644 --- a/src/internal/packager/helm/post-render.go +++ b/src/internal/packager/helm/post-render.go @@ -6,10 +6,12 @@ package helm import ( "bytes" + "context" "fmt" "os" "path/filepath" "reflect" + "time" "github.com/defenseunicorns/pkg/helpers" "github.com/defenseunicorns/zarf/src/config" @@ -93,13 +95,15 @@ func (r *renderer) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) { finalManifestsOutput := bytes.NewBuffer(nil) - // Otherwise, loop over the resources, if r.cluster != nil { - if err := r.editHelmResources(resources, finalManifestsOutput); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + if err := r.editHelmResources(ctx, resources, finalManifestsOutput); err != nil { return nil, err } - if err := r.adoptAndUpdateNamespaces(); err != nil { + if err := r.adoptAndUpdateNamespaces(ctx); err != nil { return nil, err } } else { @@ -112,9 +116,9 @@ func (r *renderer) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) { return finalManifestsOutput, nil } -func (r *renderer) adoptAndUpdateNamespaces() error { +func (r *renderer) adoptAndUpdateNamespaces(ctx context.Context) error { c := r.cluster - existingNamespaces, _ := c.GetNamespaces() + existingNamespaces, _ := c.GetNamespaces(ctx) for name, namespace := range r.namespaces { // Check to see if this namespace already exists @@ -127,7 +131,7 @@ func (r *renderer) adoptAndUpdateNamespaces() error { if !existingNamespace { // This is a new namespace, add it - if _, err := c.CreateNamespace(namespace); err != nil { + if _, err := c.CreateNamespace(ctx, namespace); err != nil { return fmt.Errorf("unable to create the missing namespace %s", name) } } else if r.cfg.DeployOpts.AdoptExistingResources { @@ -136,7 +140,7 @@ func (r *renderer) adoptAndUpdateNamespaces() error { message.Warnf("Refusing to adopt the initial namespace: %s", name) } else { // This is an existing namespace to adopt - if _, err := c.UpdateNamespace(namespace); err != nil { + if _, err := c.UpdateNamespace(ctx, namespace); err != nil { return fmt.Errorf("unable to adopt the existing namespace %s", name) } } @@ -151,10 +155,10 @@ func (r *renderer) adoptAndUpdateNamespaces() error { validRegistrySecret := c.GenerateRegistryPullCreds(name, config.ZarfImagePullSecretName, r.cfg.State.RegistryInfo) // Try to get a valid existing secret - currentRegistrySecret, _ := c.GetSecret(name, config.ZarfImagePullSecretName) + currentRegistrySecret, _ := c.GetSecret(ctx, name, config.ZarfImagePullSecretName) if currentRegistrySecret.Name != config.ZarfImagePullSecretName || !reflect.DeepEqual(currentRegistrySecret.Data, validRegistrySecret.Data) { // Create or update the zarf registry secret - if _, err := c.CreateOrUpdateSecret(validRegistrySecret); err != nil { + if _, err := c.CreateOrUpdateSecret(ctx, validRegistrySecret); err != nil { message.WarnErrf(err, "Problem creating registry secret for the %s namespace", name) } @@ -162,7 +166,7 @@ func (r *renderer) adoptAndUpdateNamespaces() error { gitServerSecret := c.GenerateGitPullCreds(name, config.ZarfGitServerSecretName, r.cfg.State.GitServer) // Create or update the zarf git server secret - if _, err := c.CreateOrUpdateSecret(gitServerSecret); err != nil { + if _, err := c.CreateOrUpdateSecret(ctx, gitServerSecret); err != nil { message.WarnErrf(err, "Problem creating git server secret for the %s namespace", name) } } @@ -170,7 +174,7 @@ func (r *renderer) adoptAndUpdateNamespaces() error { return nil } -func (r *renderer) editHelmResources(resources []releaseutil.Manifest, finalManifestsOutput *bytes.Buffer) error { +func (r *renderer) editHelmResources(ctx context.Context, resources []releaseutil.Manifest, finalManifestsOutput *bytes.Buffer) error { for _, resource := range resources { // parse to unstructured to have access to more data than just the name rawData := &unstructured.Unstructured{} @@ -236,7 +240,7 @@ func (r *renderer) editHelmResources(resources []releaseutil.Manifest, finalMani "meta.helm.sh/release-namespace": r.chart.Namespace, } - if err := r.cluster.AddLabelsAndAnnotations(deployedNamespace, rawData.GetName(), rawData.GroupVersionKind().GroupKind(), helmLabels, helmAnnotations); err != nil { + if err := r.cluster.AddLabelsAndAnnotations(ctx, deployedNamespace, rawData.GetName(), rawData.GroupVersionKind().GroupKind(), helmLabels, helmAnnotations); err != nil { // Print a debug message since this could just be because the resource doesn't exist message.Debugf("Unable to adopt resource %s: %s", rawData.GetName(), err.Error()) } diff --git a/src/internal/packager/helm/zarf.go b/src/internal/packager/helm/zarf.go index b8d42c3f84..71124447ed 100644 --- a/src/internal/packager/helm/zarf.go +++ b/src/internal/packager/helm/zarf.go @@ -5,6 +5,7 @@ package helm import ( + "context" "fmt" "github.com/defenseunicorns/zarf/src/pkg/cluster" @@ -48,7 +49,7 @@ func (h *Helm) UpdateZarfRegistryValues() error { } // UpdateZarfAgentValues updates the Zarf agent deployment with the new state values -func (h *Helm) UpdateZarfAgentValues() error { +func (h *Helm) UpdateZarfAgentValues(ctx context.Context) error { spinner := message.NewProgressSpinner("Gathering information to update Zarf Agent TLS") defer spinner.Stop() @@ -58,10 +59,14 @@ func (h *Helm) UpdateZarfAgentValues() error { } // Get the current agent image from one of its pods. - pods := h.cluster.WaitForPodsAndContainers(k8s.PodLookup{ - Namespace: cluster.ZarfNamespaceName, - Selector: "app=agent-hook", - }, nil) + pods := h.cluster.WaitForPodsAndContainers( + ctx, + k8s.PodLookup{ + Namespace: cluster.ZarfNamespaceName, + Selector: "app=agent-hook", + }, + nil, + ) var currentAgentImage transform.Image if len(pods) > 0 && len(pods[0].Spec.Containers) > 0 { @@ -115,10 +120,13 @@ func (h *Helm) UpdateZarfAgentValues() error { defer spinner.Stop() // Force pods to be recreated to get the updated secret. - err = h.cluster.DeletePods(k8s.PodLookup{ - Namespace: cluster.ZarfNamespaceName, - Selector: "app=agent-hook", - }) + err = h.cluster.DeletePods( + ctx, + k8s.PodLookup{ + Namespace: cluster.ZarfNamespaceName, + Selector: "app=agent-hook", + }, + ) if err != nil { return fmt.Errorf("error recycling pods for the Zarf Agent: %w", err) } diff --git a/src/internal/packager/images/push.go b/src/internal/packager/images/push.go index 5daff0a14f..cbfbaad22f 100644 --- a/src/internal/packager/images/push.go +++ b/src/internal/packager/images/push.go @@ -5,6 +5,7 @@ package images import ( + "context" "fmt" "net/http" "time" @@ -23,7 +24,7 @@ import ( // PushToZarfRegistry pushes a provided image into the configured Zarf registry // This function will optionally shorten the image name while appending a checksum of the original image name. -func (i *ImageConfig) PushToZarfRegistry() error { +func (i *ImageConfig) PushToZarfRegistry(ctx context.Context) error { message.Debug("images.PushToZarfRegistry()") logs.Warn.SetOutput(&message.DebugWriter{}) @@ -72,7 +73,7 @@ func (i *ImageConfig) PushToZarfRegistry() error { c, _ := cluster.NewCluster() if c != nil { - registryURL, tunnel, err = c.ConnectToZarfRegistryEndpoint(i.RegInfo) + registryURL, tunnel, err = c.ConnectToZarfRegistryEndpoint(ctx, i.RegInfo) if err != nil { return err } diff --git a/src/pkg/cluster/common.go b/src/pkg/cluster/common.go index a84a06fee3..895124bf6b 100644 --- a/src/pkg/cluster/common.go +++ b/src/pkg/cluster/common.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "time" "github.com/defenseunicorns/zarf/src/config" @@ -27,9 +28,9 @@ var labels = k8s.Labels{ config.ZarfManagedByLabel: "zarf", } -// NewClusterOrDie creates a new Cluster instance and waits up to 30 seconds for the cluster to be ready or throws a fatal error. -func NewClusterOrDie() *Cluster { - c, err := NewClusterWithWait(DefaultTimeout) +// NewClusterOrDie creates a new Cluster instance and waits for the cluster to be ready or throws a fatal error. +func NewClusterOrDie(ctx context.Context) *Cluster { + c, err := NewClusterWithWait(ctx) if err != nil { message.Fatalf(err, "Failed to connect to cluster") } @@ -38,8 +39,8 @@ func NewClusterOrDie() *Cluster { } // NewClusterWithWait creates a new Cluster instance and waits for the given timeout for the cluster to be ready. -func NewClusterWithWait(timeout time.Duration) (*Cluster, error) { - spinner := message.NewProgressSpinner("Waiting for cluster connection (%s timeout)", timeout.String()) +func NewClusterWithWait(ctx context.Context) (*Cluster, error) { + spinner := message.NewProgressSpinner("Waiting for cluster connection") defer spinner.Stop() c := &Cluster{} @@ -50,7 +51,7 @@ func NewClusterWithWait(timeout time.Duration) (*Cluster, error) { return nil, err } - err = c.WaitForHealthyCluster(timeout) + err = c.WaitForHealthyCluster(ctx) if err != nil { return nil, err } diff --git a/src/pkg/cluster/data.go b/src/pkg/cluster/data.go index d2724c0d1f..0c5e526536 100644 --- a/src/pkg/cluster/data.go +++ b/src/pkg/cluster/data.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "fmt" "os" "path/filepath" @@ -25,7 +26,7 @@ import ( // HandleDataInjection waits for the target pod(s) to come up and inject the data into them // todo: this currently requires kubectl but we should have enough k8s work to make this native now. -func (c *Cluster) HandleDataInjection(wg *sync.WaitGroup, data types.ZarfDataInjection, componentPath *layout.ComponentPaths, dataIdx int) { +func (c *Cluster) HandleDataInjection(ctx context.Context, wg *sync.WaitGroup, data types.ZarfDataInjection, componentPath *layout.ComponentPaths, dataIdx int) { defer wg.Done() injectionCompletionMarker := filepath.Join(componentPath.DataInjections, config.GetDataInjectionMarker()) @@ -74,7 +75,7 @@ iterator: } // Wait until the pod we are injecting data into becomes available - pods := c.WaitForPodsAndContainers(target, podFilterByInitContainer) + pods := c.WaitForPodsAndContainers(ctx, target, podFilterByInitContainer) if len(pods) < 1 { continue } @@ -139,7 +140,7 @@ iterator: // Block one final time to make sure at least one pod has come up and injected the data // Using only the pod as the final selector because we don't know what the container name will be // Still using the init container filter to make sure we have the right running pod - _ = c.WaitForPodsAndContainers(podOnlyTarget, podFilterByInitContainer) + _ = c.WaitForPodsAndContainers(ctx, podOnlyTarget, podFilterByInitContainer) // Cleanup now to reduce disk pressure _ = os.RemoveAll(source) diff --git a/src/pkg/cluster/injector.go b/src/pkg/cluster/injector.go index 7403c567aa..4a7c872ee5 100644 --- a/src/pkg/cluster/injector.go +++ b/src/pkg/cluster/injector.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "fmt" "net/http" "os" @@ -40,7 +41,7 @@ var ( type imageNodeMap map[string][]string // StartInjectionMadness initializes a Zarf injection into the cluster. -func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injectorSeedSrcs []string) { +func (c *Cluster) StartInjectionMadness(ctx context.Context, tmpDir string, imagesDir string, injectorSeedSrcs []string) { spinner := message.NewProgressSpinner("Attempting to bootstrap the seed image into the cluster") defer spinner.Stop() @@ -63,19 +64,20 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto var seedImages []transform.Image // Get all the images from the cluster - timeout := 5 * time.Minute - spinner.Updatef("Getting the list of existing cluster images (%s timeout)", timeout.String()) - if images, err = c.getImagesAndNodesForInjection(timeout); err != nil { + spinner.Updatef("Getting the list of existing cluster images") + findImagesCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + if images, err = c.getImagesAndNodesForInjection(findImagesCtx); err != nil { spinner.Fatalf(err, "Unable to generate a list of candidate images to perform the registry injection") } spinner.Updatef("Creating the injector configmap") - if err = c.createInjectorConfigmap(tmp.InjectionBinary); err != nil { + if err = c.createInjectorConfigmap(ctx, tmp.InjectionBinary); err != nil { spinner.Fatalf(err, "Unable to create the injector configmap") } spinner.Updatef("Creating the injector service") - if service, err := c.createService(); err != nil { + if service, err := c.createService(ctx); err != nil { spinner.Fatalf(err, "Unable to create the injector service") } else { config.ZarfSeedPort = fmt.Sprintf("%d", service.Spec.Ports[0].NodePort) @@ -87,7 +89,7 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto } spinner.Updatef("Loading the seed registry configmaps") - if payloadConfigmaps, sha256sum, err = c.createPayloadConfigmaps(tmp.SeedImagesDir, tmp.InjectorPayloadTarGz, spinner); err != nil { + if payloadConfigmaps, sha256sum, err = c.createPayloadConfigmaps(ctx, tmp.SeedImagesDir, tmp.InjectorPayloadTarGz, spinner); err != nil { spinner.Fatalf(err, "Unable to generate the injector payload configmaps") } @@ -104,7 +106,7 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto spinner.Updatef("Attempting to bootstrap with the %s/%s", node, image) // Make sure the pod is not there first - _ = c.DeletePod(ZarfNamespaceName, "injector") + _ = c.DeletePod(ctx, ZarfNamespaceName, "injector") // Update the podspec image path and use the first node found pod, err := c.buildInjectionPod(node[0], image, payloadConfigmaps, sha256sum) @@ -115,7 +117,7 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto } // Create the pod in the cluster - pod, err = c.CreatePod(pod) + pod, err = c.CreatePod(ctx, pod) if err != nil { // Just debug log the output because failures just result in trying the next image message.Debug(pod, err) @@ -123,7 +125,7 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto } // if no error, try and wait for a seed image to be present, return if successful - if c.injectorIsReady(seedImages, spinner) { + if c.injectorIsReady(ctx, seedImages, spinner) { spinner.Success() return } @@ -136,20 +138,20 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto } // StopInjectionMadness handles cleanup once the seed registry is up. -func (c *Cluster) StopInjectionMadness() error { +func (c *Cluster) StopInjectionMadness(ctx context.Context) error { // Try to kill the injector pod now - if err := c.DeletePod(ZarfNamespaceName, "injector"); err != nil { + if err := c.DeletePod(ctx, ZarfNamespaceName, "injector"); err != nil { return err } // Remove the configmaps labelMatch := map[string]string{"zarf-injector": "payload"} - if err := c.DeleteConfigMapsByLabel(ZarfNamespaceName, labelMatch); err != nil { + if err := c.DeleteConfigMapsByLabel(ctx, ZarfNamespaceName, labelMatch); err != nil { return err } // Remove the injector service - return c.DeleteService(ZarfNamespaceName, "zarf-injector") + return c.DeleteService(ctx, ZarfNamespaceName, "zarf-injector") } func (c *Cluster) loadSeedImages(imagesDir, seedImagesDir string, injectorSeedSrcs []string, spinner *message.Spinner) ([]transform.Image, error) { @@ -188,7 +190,7 @@ func (c *Cluster) loadSeedImages(imagesDir, seedImagesDir string, injectorSeedSr return seedImages, nil } -func (c *Cluster) createPayloadConfigmaps(seedImagesDir, tarPath string, spinner *message.Spinner) ([]string, string, error) { +func (c *Cluster) createPayloadConfigmaps(ctx context.Context, seedImagesDir, tarPath string, spinner *message.Spinner) ([]string, string, error) { var configMaps []string // Chunk size has to accommodate base64 encoding & etcd 1MB limit @@ -225,7 +227,7 @@ func (c *Cluster) createPayloadConfigmaps(seedImagesDir, tarPath string, spinner spinner.Updatef("Adding archive binary configmap %d of %d to the cluster", idx+1, chunkCount) // Attempt to create the configmap in the cluster - if _, err = c.ReplaceConfigmap(ZarfNamespaceName, fileName, configData); err != nil { + if _, err = c.ReplaceConfigmap(ctx, ZarfNamespaceName, fileName, configData); err != nil { return configMaps, "", err } @@ -240,13 +242,13 @@ func (c *Cluster) createPayloadConfigmaps(seedImagesDir, tarPath string, spinner } // Test for pod readiness and seed image presence. -func (c *Cluster) injectorIsReady(seedImages []transform.Image, spinner *message.Spinner) bool { +func (c *Cluster) injectorIsReady(ctx context.Context, seedImages []transform.Image, spinner *message.Spinner) bool { tunnel, err := c.NewTunnel(ZarfNamespaceName, k8s.SvcResource, ZarfInjectorName, "", 0, ZarfInjectorPort) if err != nil { return false } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return false } @@ -275,7 +277,7 @@ func (c *Cluster) injectorIsReady(seedImages []transform.Image, spinner *message return true } -func (c *Cluster) createInjectorConfigmap(binaryPath string) error { +func (c *Cluster) createInjectorConfigmap(ctx context.Context, binaryPath string) error { var err error configData := make(map[string][]byte) @@ -285,17 +287,17 @@ func (c *Cluster) createInjectorConfigmap(binaryPath string) error { } // Try to delete configmap silently - _ = c.DeleteConfigmap(ZarfNamespaceName, "rust-binary") + _ = c.DeleteConfigmap(ctx, ZarfNamespaceName, "rust-binary") // Attempt to create the configmap in the cluster - if _, err = c.CreateConfigmap(ZarfNamespaceName, "rust-binary", configData); err != nil { + if _, err = c.CreateConfigmap(ctx, ZarfNamespaceName, "rust-binary", configData); err != nil { return err } return nil } -func (c *Cluster) createService() (*corev1.Service, error) { +func (c *Cluster) createService(ctx context.Context) (*corev1.Service, error) { service := c.GenerateService(ZarfNamespaceName, "zarf-injector") service.Spec.Type = corev1.ServiceTypeNodePort @@ -307,9 +309,9 @@ func (c *Cluster) createService() (*corev1.Service, error) { } // Attempt to purse the service silently - _ = c.DeleteService(ZarfNamespaceName, "zarf-injector") + _ = c.DeleteService(ctx, ZarfNamespaceName, "zarf-injector") - return c.CreateService(service) + return c.CreateService(ctx, service) } // buildInjectionPod return a pod for injection with the appropriate containers to perform the injection. @@ -431,26 +433,20 @@ func (c *Cluster) buildInjectionPod(node, image string, payloadConfigmaps []stri return pod, nil } -// GetImagesFromAvailableNodes checks for images on schedulable nodes within a cluster and returns -func (c *Cluster) getImagesAndNodesForInjection(timeoutDuration time.Duration) (imageNodeMap, error) { - timeout := time.After(timeoutDuration) +// getImagesFromAvailableNodes checks for images on schedulable nodes within a cluster and returns +func (c *Cluster) getImagesAndNodesForInjection(ctx context.Context) (imageNodeMap, error) { result := make(imageNodeMap) for { select { - - // On timeout abort - case <-timeout: - return nil, fmt.Errorf("get image list timed-out") - - // After delay, try running + case <-ctx.Done(): + return nil, fmt.Errorf("get image list timed-out: %w", ctx.Err()) default: - pods, err := c.GetPods(corev1.NamespaceAll) + pods, err := c.GetPods(ctx, corev1.NamespaceAll) if err != nil { - return nil, fmt.Errorf("unable to get the list of pods in the cluster") + return nil, fmt.Errorf("unable to get the list of pods in the cluster: %w", err) } - findImages: for _, pod := range pods.Items { nodeName := pod.Spec.NodeName @@ -459,42 +455,44 @@ func (c *Cluster) getImagesAndNodesForInjection(timeoutDuration time.Duration) ( continue } - nodeDetails, err := c.GetNode(nodeName) - + nodeDetails, err := c.GetNode(ctx, nodeName) if err != nil { - return nil, fmt.Errorf("unable to get the node %s", pod.Spec.NodeName) + return nil, fmt.Errorf("unable to get the node %q: %w", nodeName, err) } if nodeDetails.Status.Allocatable.Cpu().Cmp(injectorRequestedCPU) < 0 || nodeDetails.Status.Allocatable.Memory().Cmp(injectorRequestedMemory) < 0 { - continue findImages + continue } for _, taint := range nodeDetails.Spec.Taints { if taint.Effect == corev1.TaintEffectNoSchedule || taint.Effect == corev1.TaintEffectNoExecute { - continue findImages + continue } } for _, container := range pod.Spec.InitContainers { result[container.Image] = append(result[container.Image], nodeName) } - for _, container := range pod.Spec.Containers { result[container.Image] = append(result[container.Image], nodeName) } - for _, container := range pod.Spec.EphemeralContainers { result[container.Image] = append(result[container.Image], nodeName) } } - } - if len(result) < 1 { - c.Log("no images found: %w") - time.Sleep(2 * time.Second) - } else { - return result, nil + if len(result) > 0 { + return result, nil + } + + c.Log("No images found on any node. Retrying...") + + select { + case <-ctx.Done(): + return nil, fmt.Errorf("get image list cancelled or timed out while waiting to retry: %w", ctx.Err()) + case <-time.After(2 * time.Second): + } } } } diff --git a/src/pkg/cluster/namespace.go b/src/pkg/cluster/namespace.go index 82b4277901..a7209936b3 100644 --- a/src/pkg/cluster/namespace.go +++ b/src/pkg/cluster/namespace.go @@ -11,9 +11,9 @@ import ( ) // DeleteZarfNamespace deletes the Zarf namespace from the connected cluster. -func (c *Cluster) DeleteZarfNamespace() { +func (c *Cluster) DeleteZarfNamespace(ctx context.Context) error { spinner := message.NewProgressSpinner("Deleting the zarf namespace from this cluster") defer spinner.Stop() - c.DeleteNamespace(context.TODO(), ZarfNamespaceName) + return c.DeleteNamespace(ctx, ZarfNamespaceName) } diff --git a/src/pkg/cluster/secrets.go b/src/pkg/cluster/secrets.go index 2cdb1a20d1..17183d5e62 100644 --- a/src/pkg/cluster/secrets.go +++ b/src/pkg/cluster/secrets.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "encoding/base64" "encoding/json" "reflect" @@ -73,16 +74,16 @@ func (c *Cluster) GenerateGitPullCreds(namespace, name string, gitServerInfo typ } // UpdateZarfManagedImageSecrets updates all Zarf-managed image secrets in all namespaces based on state -func (c *Cluster) UpdateZarfManagedImageSecrets(state *types.ZarfState) { +func (c *Cluster) UpdateZarfManagedImageSecrets(ctx context.Context, state *types.ZarfState) { spinner := message.NewProgressSpinner("Updating existing Zarf-managed image secrets") defer spinner.Stop() - if namespaces, err := c.GetNamespaces(); err != nil { + if namespaces, err := c.GetNamespaces(ctx); err != nil { spinner.Errorf(err, "Unable to get k8s namespaces") } else { // Update all image pull secrets for _, namespace := range namespaces.Items { - currentRegistrySecret, err := c.GetSecret(namespace.Name, config.ZarfImagePullSecretName) + currentRegistrySecret, err := c.GetSecret(ctx, namespace.Name, config.ZarfImagePullSecretName) if err != nil { continue } @@ -96,7 +97,7 @@ func (c *Cluster) UpdateZarfManagedImageSecrets(state *types.ZarfState) { newRegistrySecret := c.GenerateRegistryPullCreds(namespace.Name, config.ZarfImagePullSecretName, state.RegistryInfo) if !reflect.DeepEqual(currentRegistrySecret.Data, newRegistrySecret.Data) { // Create or update the zarf registry secret - if _, err := c.CreateOrUpdateSecret(newRegistrySecret); err != nil { + if _, err := c.CreateOrUpdateSecret(ctx, newRegistrySecret); err != nil { message.WarnErrf(err, "Problem creating registry secret for the %s namespace", namespace.Name) } } @@ -107,16 +108,16 @@ func (c *Cluster) UpdateZarfManagedImageSecrets(state *types.ZarfState) { } // UpdateZarfManagedGitSecrets updates all Zarf-managed git secrets in all namespaces based on state -func (c *Cluster) UpdateZarfManagedGitSecrets(state *types.ZarfState) { +func (c *Cluster) UpdateZarfManagedGitSecrets(ctx context.Context, state *types.ZarfState) { spinner := message.NewProgressSpinner("Updating existing Zarf-managed git secrets") defer spinner.Stop() - if namespaces, err := c.GetNamespaces(); err != nil { + if namespaces, err := c.GetNamespaces(ctx); err != nil { spinner.Errorf(err, "Unable to get k8s namespaces") } else { // Update all git pull secrets for _, namespace := range namespaces.Items { - currentGitSecret, err := c.GetSecret(namespace.Name, config.ZarfGitServerSecretName) + currentGitSecret, err := c.GetSecret(ctx, namespace.Name, config.ZarfGitServerSecretName) if err != nil { continue } @@ -130,7 +131,7 @@ func (c *Cluster) UpdateZarfManagedGitSecrets(state *types.ZarfState) { newGitSecret := c.GenerateGitPullCreds(namespace.Name, config.ZarfGitServerSecretName, state.GitServer) if !reflect.DeepEqual(currentGitSecret.StringData, newGitSecret.StringData) { // Create or update the zarf git secret - if _, err := c.CreateOrUpdateSecret(newGitSecret); err != nil { + if _, err := c.CreateOrUpdateSecret(ctx, newGitSecret); err != nil { message.WarnErrf(err, "Problem creating git server secret for the %s namespace", namespace.Name) } } diff --git a/src/pkg/cluster/state.go b/src/pkg/cluster/state.go index ebac18fbd3..a2585c4361 100644 --- a/src/pkg/cluster/state.go +++ b/src/pkg/cluster/state.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "encoding/json" "fmt" "time" @@ -34,7 +35,7 @@ const ( ) // InitZarfState initializes the Zarf state with the given temporary directory and init configs. -func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { +func (c *Cluster) InitZarfState(ctx context.Context, initOptions types.ZarfInitOptions) error { var ( distro string err error @@ -46,7 +47,7 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { // Attempt to load an existing state prior to init. // NOTE: We are ignoring the error here because we don't really expect a state to exist yet. spinner.Updatef("Checking cluster for existing Zarf deployment") - state, _ := c.LoadZarfState() + state, _ := c.LoadZarfState(ctx) // If state is nil, this is a new cluster. if state == nil { @@ -59,7 +60,7 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { state.ZarfAppliance = true } else { // Otherwise, trying to detect the K8s distro type. - distro, err = c.DetectDistro() + distro, err = c.DetectDistro(ctx) if err != nil { // This is a basic failure right now but likely could be polished to provide user guidance to resolve. return fmt.Errorf("unable to connect to the cluster to verify the distro: %w", err) @@ -79,7 +80,7 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { // Setup zarf agent PKI state.AgentTLS = pki.GeneratePKI(config.ZarfAgentHost) - namespaces, err := c.GetNamespaces() + namespaces, err := c.GetNamespaces(ctx) if err != nil { return fmt.Errorf("unable to get the Kubernetes namespaces: %w", err) } @@ -93,7 +94,7 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { // This label will tell the Zarf Agent to ignore this namespace. namespace.Labels[agentLabel] = "ignore" namespaceCopy := namespace - if _, err = c.UpdateNamespace(&namespaceCopy); err != nil { + if _, err = c.UpdateNamespace(ctx, &namespaceCopy); err != nil { // This is not a hard failure, but we should log it. message.WarnErrf(err, "Unable to mark the namespace %s as ignored by Zarf Agent", namespace.Name) } @@ -102,14 +103,16 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { // Try to create the zarf namespace. spinner.Updatef("Creating the Zarf namespace") zarfNamespace := c.NewZarfManagedNamespace(ZarfNamespaceName) - if _, err := c.CreateNamespace(zarfNamespace); err != nil { + if _, err := c.CreateNamespace(ctx, zarfNamespace); err != nil { return fmt.Errorf("unable to create the zarf namespace: %w", err) } // Wait up to 2 minutes for the default service account to be created. // Some clusters seem to take a while to create this, see https://github.com/kubernetes/kubernetes/issues/66689. // The default SA is required for pods to start properly. - if _, err := c.WaitForServiceAccount(ZarfNamespaceName, "default", 2*time.Minute); err != nil { + saCtx, cancel := context.WithTimeout(ctx, 2*time.Minute) + defer cancel() + if _, err := c.WaitForServiceAccount(saCtx, ZarfNamespaceName, "default"); err != nil { return fmt.Errorf("unable get default Zarf service account: %w", err) } @@ -158,7 +161,7 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { spinner.Success() // Save the state back to K8s - if err := c.SaveZarfState(state); err != nil { + if err := c.SaveZarfState(ctx, state); err != nil { return fmt.Errorf("unable to save the Zarf state: %w", err) } @@ -166,9 +169,9 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { } // LoadZarfState returns the current zarf/zarf-state secret data or an empty ZarfState. -func (c *Cluster) LoadZarfState() (state *types.ZarfState, err error) { +func (c *Cluster) LoadZarfState(ctx context.Context) (state *types.ZarfState, err error) { // Set up the API connection - secret, err := c.GetSecret(ZarfNamespaceName, ZarfStateSecretName) + secret, err := c.GetSecret(ctx, ZarfNamespaceName, ZarfStateSecretName) if err != nil { return nil, fmt.Errorf("%w. %s", err, message.ColorWrap("Did you remember to zarf init?", color.Bold)) } @@ -218,7 +221,7 @@ func (c *Cluster) debugPrintZarfState(state *types.ZarfState) { } // SaveZarfState takes a given state and persists it to the Zarf/zarf-state secret. -func (c *Cluster) SaveZarfState(state *types.ZarfState) error { +func (c *Cluster) SaveZarfState(ctx context.Context, state *types.ZarfState) error { c.debugPrintZarfState(state) // Convert the data back to JSON. @@ -249,7 +252,7 @@ func (c *Cluster) SaveZarfState(state *types.ZarfState) error { } // Attempt to create or update the secret and return. - if _, err := c.CreateOrUpdateSecret(secret); err != nil { + if _, err := c.CreateOrUpdateSecret(ctx, secret); err != nil { return fmt.Errorf("unable to create the zarf state secret") } diff --git a/src/pkg/cluster/tunnel.go b/src/pkg/cluster/tunnel.go index d52c606ecc..c960fb1e8e 100644 --- a/src/pkg/cluster/tunnel.go +++ b/src/pkg/cluster/tunnel.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "fmt" "strings" @@ -54,8 +55,8 @@ func NewTunnelInfo(namespace, resourceType, resourceName, urlSuffix string, loca } // PrintConnectTable will print a table of all Zarf connect matches found in the cluster. -func (c *Cluster) PrintConnectTable() error { - list, err := c.GetServicesByLabelExists(v1.NamespaceAll, config.ZarfConnectLabelName) +func (c *Cluster) PrintConnectTable(ctx context.Context) error { + list, err := c.GetServicesByLabelExists(ctx, v1.NamespaceAll, config.ZarfConnectLabelName) if err != nil { return err } @@ -78,7 +79,7 @@ func (c *Cluster) PrintConnectTable() error { } // Connect will establish a tunnel to the specified target. -func (c *Cluster) Connect(target string) (*k8s.Tunnel, error) { +func (c *Cluster) Connect(ctx context.Context, target string) (*k8s.Tunnel, error) { var err error zt := TunnelInfo{ namespace: ZarfNamespaceName, @@ -107,7 +108,7 @@ func (c *Cluster) Connect(target string) (*k8s.Tunnel, error) { default: if target != "" { - if zt, err = c.checkForZarfConnectLabel(target); err != nil { + if zt, err = c.checkForZarfConnectLabel(ctx, target); err != nil { return nil, fmt.Errorf("problem looking for a zarf connect label in the cluster: %s", err.Error()) } } @@ -120,17 +121,17 @@ func (c *Cluster) Connect(target string) (*k8s.Tunnel, error) { } } - return c.ConnectTunnelInfo(zt) + return c.ConnectTunnelInfo(ctx, zt) } // ConnectTunnelInfo connects to the cluster with the provided TunnelInfo -func (c *Cluster) ConnectTunnelInfo(zt TunnelInfo) (*k8s.Tunnel, error) { +func (c *Cluster) ConnectTunnelInfo(ctx context.Context, zt TunnelInfo) (*k8s.Tunnel, error) { tunnel, err := c.NewTunnel(zt.namespace, zt.resourceType, zt.resourceName, zt.urlSuffix, zt.localPort, zt.remotePort) if err != nil { return nil, err } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return nil, err } @@ -139,7 +140,7 @@ func (c *Cluster) ConnectTunnelInfo(zt TunnelInfo) (*k8s.Tunnel, error) { } // ConnectToZarfRegistryEndpoint determines if a registry endpoint is in cluster, and if so opens a tunnel to connect to it -func (c *Cluster) ConnectToZarfRegistryEndpoint(registryInfo types.RegistryInfo) (string, *k8s.Tunnel, error) { +func (c *Cluster) ConnectToZarfRegistryEndpoint(ctx context.Context, registryInfo types.RegistryInfo) (string, *k8s.Tunnel, error) { registryEndpoint := registryInfo.Address var err error @@ -150,7 +151,7 @@ func (c *Cluster) ConnectToZarfRegistryEndpoint(registryInfo types.RegistryInfo) return "", tunnel, err } } else { - svcInfo, err := c.ServiceInfoFromNodePortURL(registryInfo.Address) + svcInfo, err := c.ServiceInfoFromNodePortURL(ctx, registryInfo.Address) // If this is a service (no error getting svcInfo), create a port-forward tunnel to that resource if err == nil { @@ -161,7 +162,7 @@ func (c *Cluster) ConnectToZarfRegistryEndpoint(registryInfo types.RegistryInfo) } if tunnel != nil { - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return "", tunnel, err } @@ -172,13 +173,13 @@ func (c *Cluster) ConnectToZarfRegistryEndpoint(registryInfo types.RegistryInfo) } // checkForZarfConnectLabel looks in the cluster for a connect name that matches the target -func (c *Cluster) checkForZarfConnectLabel(name string) (TunnelInfo, error) { +func (c *Cluster) checkForZarfConnectLabel(ctx context.Context, name string) (TunnelInfo, error) { var err error var zt TunnelInfo message.Debugf("Looking for a Zarf Connect Label in the cluster") - matches, err := c.GetServicesByLabel("", config.ZarfConnectLabelName, name) + matches, err := c.GetServicesByLabel(ctx, "", config.ZarfConnectLabelName, name) if err != nil { return zt, fmt.Errorf("unable to lookup the service: %w", err) } @@ -195,7 +196,7 @@ func (c *Cluster) checkForZarfConnectLabel(name string) (TunnelInfo, error) { zt.remotePort = svc.Spec.Ports[0].TargetPort.IntValue() // if targetPort == 0, look for Port (which is required) if zt.remotePort == 0 { - zt.remotePort = c.FindPodContainerPort(svc) + zt.remotePort = c.FindPodContainerPort(ctx, svc) } // Add the url suffix too. diff --git a/src/pkg/cluster/zarf.go b/src/pkg/cluster/zarf.go index afef2362b3..1a90be06ab 100644 --- a/src/pkg/cluster/zarf.go +++ b/src/pkg/cluster/zarf.go @@ -7,7 +7,6 @@ package cluster import ( "context" "encoding/json" - "errors" "fmt" "strings" "time" @@ -23,11 +22,11 @@ import ( // GetDeployedZarfPackages gets metadata information about packages that have been deployed to the cluster. // We determine what packages have been deployed to the cluster by looking for specific secrets in the Zarf namespace. // Returns a list of DeployedPackage structs and a list of errors. -func (c *Cluster) GetDeployedZarfPackages() ([]types.DeployedPackage, []error) { +func (c *Cluster) GetDeployedZarfPackages(ctx context.Context) ([]types.DeployedPackage, []error) { var deployedPackages = []types.DeployedPackage{} var errorList []error // Get the secrets that describe the deployed packages - secrets, err := c.GetSecretsWithLabel(ZarfNamespaceName, ZarfPackageInfoLabel) + secrets, err := c.GetSecretsWithLabel(ctx, ZarfNamespaceName, ZarfPackageInfoLabel) if err != nil { return deployedPackages, append(errorList, err) } @@ -52,9 +51,9 @@ func (c *Cluster) GetDeployedZarfPackages() ([]types.DeployedPackage, []error) { // GetDeployedPackage gets the metadata information about the package name provided (if it exists in the cluster). // We determine what packages have been deployed to the cluster by looking for specific secrets in the Zarf namespace. -func (c *Cluster) GetDeployedPackage(packageName string) (deployedPackage *types.DeployedPackage, err error) { +func (c *Cluster) GetDeployedPackage(ctx context.Context, packageName string) (deployedPackage *types.DeployedPackage, err error) { // Get the secret that describes the deployed package - secret, err := c.GetSecret(ZarfNamespaceName, config.ZarfPackagePrefix+packageName) + secret, err := c.GetSecret(ctx, ZarfNamespaceName, config.ZarfPackagePrefix+packageName) if err != nil { return deployedPackage, err } @@ -63,7 +62,7 @@ func (c *Cluster) GetDeployedPackage(packageName string) (deployedPackage *types } // StripZarfLabelsAndSecretsFromNamespaces removes metadata and secrets from existing namespaces no longer manged by Zarf. -func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces() { +func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces(ctx context.Context) { spinner := message.NewProgressSpinner("Removing zarf metadata & secrets from existing namespaces not managed by Zarf") defer spinner.Stop() @@ -72,7 +71,7 @@ func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces() { LabelSelector: config.ZarfManagedByLabel + "=zarf", } - if namespaces, err := c.GetNamespaces(); err != nil { + if namespaces, err := c.GetNamespaces(ctx); err != nil { spinner.Errorf(err, "Unable to get k8s namespaces") } else { for _, namespace := range namespaces.Items { @@ -80,7 +79,7 @@ func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces() { spinner.Updatef("Removing Zarf Agent label for namespace %s", namespace.Name) delete(namespace.Labels, agentLabel) namespaceCopy := namespace - if _, err = c.UpdateNamespace(&namespaceCopy); err != nil { + if _, err = c.UpdateNamespace(ctx, &namespaceCopy); err != nil { // This is not a hard failure, but we should log it spinner.Errorf(err, "Unable to update the namespace labels for %s", namespace.Name) } @@ -89,7 +88,7 @@ func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces() { spinner.Updatef("Removing Zarf secrets for namespace %s", namespace.Name) err := c.Clientset.CoreV1(). Secrets(namespace.Name). - DeleteCollection(context.TODO(), deleteOptions, listOptions) + DeleteCollection(ctx, deleteOptions, listOptions) if err != nil { spinner.Errorf(err, "Unable to delete secrets from namespace %s", namespace.Name) } @@ -125,9 +124,8 @@ func (c *Cluster) PackageSecretNeedsWait(deployedPackage *types.DeployedPackage, } // RecordPackageDeploymentAndWait records the deployment of a package to the cluster and waits for any webhooks to complete. -func (c *Cluster) RecordPackageDeploymentAndWait(pkg types.ZarfPackage, components []types.DeployedComponent, connectStrings types.ConnectStrings, generation int, component types.ZarfComponent, skipWebhooks bool) (deployedPackage *types.DeployedPackage, err error) { - - deployedPackage, err = c.RecordPackageDeployment(pkg, components, connectStrings, generation) +func (c *Cluster) RecordPackageDeploymentAndWait(ctx context.Context, pkg types.ZarfPackage, components []types.DeployedComponent, connectStrings types.ConnectStrings, generation int, component types.ZarfComponent, skipWebhooks bool) (deployedPackage *types.DeployedPackage, err error) { + deployedPackage, err = c.RecordPackageDeployment(ctx, pkg, components, connectStrings, generation) if err != nil { return nil, err } @@ -138,38 +136,37 @@ func (c *Cluster) RecordPackageDeploymentAndWait(pkg types.ZarfPackage, componen return nil, nil } - // Timebox the amount of time we wait for a webhook to complete before erroring waitDuration := types.DefaultWebhookWaitDuration if waitSeconds > 0 { waitDuration = time.Duration(waitSeconds) * time.Second } - timeout := time.After(waitDuration) - // We need to wait for this package to finish having webhooks run, create a spinner and keep checking until it's ready - spinner := message.NewProgressSpinner("Waiting for webhook '%s' to complete for component '%s'", hookName, component.Name) + waitCtx, cancel := context.WithTimeout(ctx, waitDuration) + defer cancel() + + spinner := message.NewProgressSpinner("Waiting for webhook %q to complete for component %q", hookName, component.Name) defer spinner.Stop() - for packageNeedsWait { + + for { select { - // On timeout, abort and return an error. - case <-timeout: - return nil, errors.New("timed out waiting for package deployment to complete") - default: - // Wait for 1 second before checking the secret again - time.Sleep(1 * time.Second) - deployedPackage, err = c.GetDeployedPackage(deployedPackage.Name) + case <-waitCtx.Done(): + return nil, fmt.Errorf("timed out waiting for package deployment to complete: %w", waitCtx.Err()) + case <-time.After(1 * time.Second): + deployedPackage, err = c.GetDeployedPackage(ctx, deployedPackage.Name) if err != nil { return nil, err } packageNeedsWait, _, _ = c.PackageSecretNeedsWait(deployedPackage, component, skipWebhooks) + if !packageNeedsWait { + spinner.Success() + return deployedPackage, nil + } } } - - spinner.Success() - return deployedPackage, nil } // RecordPackageDeployment saves metadata about a package that has been deployed to the cluster. -func (c *Cluster) RecordPackageDeployment(pkg types.ZarfPackage, components []types.DeployedComponent, connectStrings types.ConnectStrings, generation int) (deployedPackage *types.DeployedPackage, err error) { +func (c *Cluster) RecordPackageDeployment(ctx context.Context, pkg types.ZarfPackage, components []types.DeployedComponent, connectStrings types.ConnectStrings, generation int) (deployedPackage *types.DeployedPackage, err error) { packageName := pkg.Metadata.Name // Generate a secret that describes the package that is being deployed @@ -179,7 +176,7 @@ func (c *Cluster) RecordPackageDeployment(pkg types.ZarfPackage, components []ty // Attempt to load information about webhooks for the package var componentWebhooks map[string]map[string]types.Webhook - existingPackageSecret, err := c.GetDeployedPackage(packageName) + existingPackageSecret, err := c.GetDeployedPackage(ctx, packageName) if err != nil { message.Debugf("Unable to fetch existing secret for package '%s': %s", packageName, err.Error()) } @@ -205,7 +202,7 @@ func (c *Cluster) RecordPackageDeployment(pkg types.ZarfPackage, components []ty // Update the package secret deployedPackageSecret.Data = map[string][]byte{"data": packageData} var updatedSecret *corev1.Secret - if updatedSecret, err = c.CreateOrUpdateSecret(deployedPackageSecret); err != nil { + if updatedSecret, err = c.CreateOrUpdateSecret(ctx, deployedPackageSecret); err != nil { return nil, fmt.Errorf("failed to record package deployment in secret '%s'", deployedPackageSecret.Name) } @@ -217,8 +214,8 @@ func (c *Cluster) RecordPackageDeployment(pkg types.ZarfPackage, components []ty } // EnableRegHPAScaleDown enables the HPA scale down for the Zarf Registry. -func (c *Cluster) EnableRegHPAScaleDown() error { - hpa, err := c.GetHPA(ZarfNamespaceName, "zarf-docker-registry") +func (c *Cluster) EnableRegHPAScaleDown(ctx context.Context) error { + hpa, err := c.GetHPA(ctx, ZarfNamespaceName, "zarf-docker-registry") if err != nil { return err } @@ -228,7 +225,7 @@ func (c *Cluster) EnableRegHPAScaleDown() error { hpa.Spec.Behavior.ScaleDown.SelectPolicy = &policy // Save the HPA changes. - if _, err = c.UpdateHPA(hpa); err != nil { + if _, err = c.UpdateHPA(ctx, hpa); err != nil { return err } @@ -236,8 +233,8 @@ func (c *Cluster) EnableRegHPAScaleDown() error { } // DisableRegHPAScaleDown disables the HPA scale down for the Zarf Registry. -func (c *Cluster) DisableRegHPAScaleDown() error { - hpa, err := c.GetHPA(ZarfNamespaceName, "zarf-docker-registry") +func (c *Cluster) DisableRegHPAScaleDown(ctx context.Context) error { + hpa, err := c.GetHPA(ctx, ZarfNamespaceName, "zarf-docker-registry") if err != nil { return err } @@ -247,7 +244,7 @@ func (c *Cluster) DisableRegHPAScaleDown() error { hpa.Spec.Behavior.ScaleDown.SelectPolicy = &policy // Save the HPA changes. - if _, err = c.UpdateHPA(hpa); err != nil { + if _, err = c.UpdateHPA(ctx, hpa); err != nil { return err } @@ -255,8 +252,8 @@ func (c *Cluster) DisableRegHPAScaleDown() error { } // GetInstalledChartsForComponent returns any installed Helm Charts for the provided package component. -func (c *Cluster) GetInstalledChartsForComponent(packageName string, component types.ZarfComponent) (installedCharts []types.InstalledChart, err error) { - deployedPackage, err := c.GetDeployedPackage(packageName) +func (c *Cluster) GetInstalledChartsForComponent(ctx context.Context, packageName string, component types.ZarfComponent) (installedCharts []types.InstalledChart, err error) { + deployedPackage, err := c.GetDeployedPackage(ctx, packageName) if err != nil { return installedCharts, err } diff --git a/src/pkg/k8s/common.go b/src/pkg/k8s/common.go index d11be666d2..5f94350b56 100644 --- a/src/pkg/k8s/common.go +++ b/src/pkg/k8s/common.go @@ -5,6 +5,7 @@ package k8s import ( + "context" "fmt" "time" @@ -39,31 +40,17 @@ func New(logger Log, defaultLabels Labels) (*K8s, error) { }, nil } -// NewWithWait is a convenience function that creates a new K8s client and waits for the cluster to be healthy. -func NewWithWait(logger Log, defaultLabels Labels, timeout time.Duration) (*K8s, error) { - k, err := New(logger, defaultLabels) - if err != nil { - return nil, err - } - - return k, k.WaitForHealthyCluster(timeout) -} - // WaitForHealthyCluster checks for an available K8s cluster every second until timeout. -func (k *K8s) WaitForHealthyCluster(timeout time.Duration) error { +func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { var err error var nodes *v1.NodeList var pods *v1.PodList - expired := time.After(timeout) for { select { - // on timeout abort - case <-expired: - return fmt.Errorf("timed out waiting for cluster to report healthy") - - // after delay, try running - default: + case <-ctx.Done(): + return fmt.Errorf("timed out waiting for cluster to report healthy: %w", ctx.Err()) + case <-time.After(1 * time.Second): if k.RestConfig == nil || k.Clientset == nil { config, clientset, err := connect() if err != nil { @@ -76,21 +63,20 @@ func (k *K8s) WaitForHealthyCluster(timeout time.Duration) error { } // Make sure there is at least one running Node - nodes, err = k.GetNodes() + nodes, err = k.GetNodes(ctx) if err != nil || len(nodes.Items) < 1 { k.Log("No nodes reporting healthy yet: %#v\n", err) continue } // Get the cluster pod list - if pods, err = k.GetAllPods(); err != nil { + if pods, err = k.GetAllPods(ctx); err != nil { k.Log("Could not get the pod list: %w", err) continue } // Check that at least one pod is in the 'succeeded' or 'running' state for _, pod := range pods.Items { - // If a valid pod is found, return no error if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodRunning { return nil } @@ -98,9 +84,6 @@ func (k *K8s) WaitForHealthyCluster(timeout time.Duration) error { k.Log("No pods reported 'succeeded' or 'running' state yet.") } - - // delay check 1 seconds - time.Sleep(1 * time.Second) } } diff --git a/src/pkg/k8s/configmap.go b/src/pkg/k8s/configmap.go index 626b8b30b5..41ff9cba7b 100644 --- a/src/pkg/k8s/configmap.go +++ b/src/pkg/k8s/configmap.go @@ -15,16 +15,15 @@ import ( ) // ReplaceConfigmap deletes and recreates a configmap. -func (k *K8s) ReplaceConfigmap(namespace, name string, data map[string][]byte) (*corev1.ConfigMap, error) { - if err := k.DeleteConfigmap(namespace, name); err != nil { +func (k *K8s) ReplaceConfigmap(ctx context.Context, namespace, name string, data map[string][]byte) (*corev1.ConfigMap, error) { + if err := k.DeleteConfigmap(ctx, namespace, name); err != nil { return nil, err } - - return k.CreateConfigmap(namespace, name, data) + return k.CreateConfigmap(ctx, namespace, name, data) } // CreateConfigmap applies a configmap to the cluster. -func (k *K8s) CreateConfigmap(namespace, name string, data map[string][]byte) (*corev1.ConfigMap, error) { +func (k *K8s) CreateConfigmap(ctx context.Context, namespace, name string, data map[string][]byte) (*corev1.ConfigMap, error) { configMap := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -37,14 +36,14 @@ func (k *K8s) CreateConfigmap(namespace, name string, data map[string][]byte) (* configMap.ObjectMeta.Labels = helpers.MergeMap[string](k.Labels, configMap.ObjectMeta.Labels) createOptions := metav1.CreateOptions{} - return k.Clientset.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap, createOptions) + return k.Clientset.CoreV1().ConfigMaps(namespace).Create(ctx, configMap, createOptions) } // DeleteConfigmap deletes a configmap by name. -func (k *K8s) DeleteConfigmap(namespace, name string) error { +func (k *K8s) DeleteConfigmap(ctx context.Context, namespace, name string) error { namespaceConfigmap := k.Clientset.CoreV1().ConfigMaps(namespace) - err := namespaceConfigmap.Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := namespaceConfigmap.Delete(ctx, name, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("error deleting the configmap: %w", err) } @@ -53,7 +52,7 @@ func (k *K8s) DeleteConfigmap(namespace, name string) error { } // DeleteConfigMapsByLabel deletes a configmap by label(s). -func (k *K8s) DeleteConfigMapsByLabel(namespace string, labels Labels) error { +func (k *K8s) DeleteConfigMapsByLabel(ctx context.Context, namespace string, labels Labels) error { labelSelector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ MatchLabels: labels, }) @@ -62,5 +61,5 @@ func (k *K8s) DeleteConfigMapsByLabel(namespace string, labels Labels) error { LabelSelector: labelSelector.String(), } - return k.Clientset.CoreV1().ConfigMaps(namespace).DeleteCollection(context.TODO(), metaOptions, listOptions) + return k.Clientset.CoreV1().ConfigMaps(namespace).DeleteCollection(ctx, metaOptions, listOptions) } diff --git a/src/pkg/k8s/dynamic.go b/src/pkg/k8s/dynamic.go index daf87c7a1a..59f295f26b 100644 --- a/src/pkg/k8s/dynamic.go +++ b/src/pkg/k8s/dynamic.go @@ -15,17 +15,17 @@ import ( ) // AddLabelsAndAnnotations adds the provided labels and annotations to the specified K8s resource -func (k *K8s) AddLabelsAndAnnotations(resourceNamespace string, resourceName string, groupKind schema.GroupKind, labels map[string]string, annotations map[string]string) error { - return k.updateLabelsAndAnnotations(resourceNamespace, resourceName, groupKind, labels, annotations, false) +func (k *K8s) AddLabelsAndAnnotations(ctx context.Context, resourceNamespace, resourceName string, groupKind schema.GroupKind, labels, annotations map[string]string) error { + return k.updateLabelsAndAnnotations(ctx, resourceNamespace, resourceName, groupKind, labels, annotations, false) } // RemoveLabelsAndAnnotations removes the provided labels and annotations to the specified K8s resource -func (k *K8s) RemoveLabelsAndAnnotations(resourceNamespace string, resourceName string, groupKind schema.GroupKind, labels map[string]string, annotations map[string]string) error { - return k.updateLabelsAndAnnotations(resourceNamespace, resourceName, groupKind, labels, annotations, true) +func (k *K8s) RemoveLabelsAndAnnotations(ctx context.Context, resourceNamespace, resourceName string, groupKind schema.GroupKind, labels, annotations map[string]string) error { + return k.updateLabelsAndAnnotations(ctx, resourceNamespace, resourceName, groupKind, labels, annotations, true) } // updateLabelsAndAnnotations updates the provided labels and annotations to the specified K8s resource -func (k *K8s) updateLabelsAndAnnotations(resourceNamespace string, resourceName string, groupKind schema.GroupKind, labels map[string]string, annotations map[string]string, isRemove bool) error { +func (k *K8s) updateLabelsAndAnnotations(ctx context.Context, resourceNamespace, resourceName string, groupKind schema.GroupKind, labels, annotations map[string]string, isRemove bool) error { dynamicClient := dynamic.NewForConfigOrDie(k.RestConfig) discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(k.RestConfig) @@ -41,7 +41,7 @@ func (k *K8s) updateLabelsAndAnnotations(resourceNamespace string, resourceName return err } - deployedResource, err := dynamicClient.Resource(mapping.Resource).Namespace(resourceNamespace).Get(context.TODO(), resourceName, metav1.GetOptions{}) + deployedResource, err := dynamicClient.Resource(mapping.Resource).Namespace(resourceNamespace).Get(ctx, resourceName, metav1.GetOptions{}) if err != nil { return err } @@ -78,6 +78,6 @@ func (k *K8s) updateLabelsAndAnnotations(resourceNamespace string, resourceName deployedResource.SetAnnotations(deployedAnnotations) - _, err = dynamicClient.Resource(mapping.Resource).Namespace(resourceNamespace).Update(context.TODO(), deployedResource, metav1.UpdateOptions{}) + _, err = dynamicClient.Resource(mapping.Resource).Namespace(resourceNamespace).Update(ctx, deployedResource, metav1.UpdateOptions{}) return err } diff --git a/src/pkg/k8s/hpa.go b/src/pkg/k8s/hpa.go index a159823f67..902f5b1d29 100644 --- a/src/pkg/k8s/hpa.go +++ b/src/pkg/k8s/hpa.go @@ -13,24 +13,24 @@ import ( ) // GetAllHPAs returns a list of horizontal pod autoscalers for all namespaces. -func (k *K8s) GetAllHPAs() (*autoscalingV2.HorizontalPodAutoscalerList, error) { - return k.GetHPAs(corev1.NamespaceAll) +func (k *K8s) GetAllHPAs(ctx context.Context) (*autoscalingV2.HorizontalPodAutoscalerList, error) { + return k.GetHPAs(ctx, corev1.NamespaceAll) } // GetHPAs returns a list of horizontal pod autoscalers in a given namespace. -func (k *K8s) GetHPAs(namespace string) (*autoscalingV2.HorizontalPodAutoscalerList, error) { +func (k *K8s) GetHPAs(ctx context.Context, namespace string) (*autoscalingV2.HorizontalPodAutoscalerList, error) { metaOptions := metav1.ListOptions{} - return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(context.TODO(), metaOptions) + return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(ctx, metaOptions) } // GetHPA returns a single horizontal pod autoscaler by namespace and name. -func (k *K8s) GetHPA(namespace, name string) (*autoscalingV2.HorizontalPodAutoscaler, error) { +func (k *K8s) GetHPA(ctx context.Context, namespace, name string) (*autoscalingV2.HorizontalPodAutoscaler, error) { metaOptions := metav1.GetOptions{} - return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metaOptions) + return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(ctx, name, metaOptions) } // UpdateHPA updates the given horizontal pod autoscaler in the cluster. -func (k *K8s) UpdateHPA(hpa *autoscalingV2.HorizontalPodAutoscaler) (*autoscalingV2.HorizontalPodAutoscaler, error) { +func (k *K8s) UpdateHPA(ctx context.Context, hpa *autoscalingV2.HorizontalPodAutoscaler) (*autoscalingV2.HorizontalPodAutoscaler, error) { metaOptions := metav1.UpdateOptions{} - return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(hpa.Namespace).Update(context.TODO(), hpa, metaOptions) + return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(hpa.Namespace).Update(ctx, hpa, metaOptions) } diff --git a/src/pkg/k8s/info.go b/src/pkg/k8s/info.go index 61effabdaa..edb655b964 100644 --- a/src/pkg/k8s/info.go +++ b/src/pkg/k8s/info.go @@ -5,6 +5,7 @@ package k8s import ( + "context" "errors" "fmt" "regexp" @@ -29,7 +30,7 @@ const ( ) // DetectDistro returns the matching distro or unknown if not found. -func (k *K8s) DetectDistro() (string, error) { +func (k *K8s) DetectDistro(ctx context.Context) (string, error) { kindNodeRegex := regexp.MustCompile(`^kind://`) k3dNodeRegex := regexp.MustCompile(`^k3s://k3d-`) eksNodeRegex := regexp.MustCompile(`^aws:///`) @@ -38,7 +39,7 @@ func (k *K8s) DetectDistro() (string, error) { rke2Regex := regexp.MustCompile(`^rancher/rancher-agent:v2`) tkgRegex := regexp.MustCompile(`^projects\.registry\.vmware\.com/tkg/tanzu_core/`) - nodes, err := k.GetNodes() + nodes, err := k.GetNodes(ctx) if err != nil { return DistroIsUnknown, errors.New("error getting cluster nodes") } @@ -99,7 +100,7 @@ func (k *K8s) DetectDistro() (string, error) { } } - namespaces, err := k.GetNamespaces() + namespaces, err := k.GetNamespaces(ctx) if err != nil { return DistroIsUnknown, errors.New("error getting namespace list") } @@ -115,8 +116,8 @@ func (k *K8s) DetectDistro() (string, error) { } // GetArchitectures returns the cluster system architectures if found. -func (k *K8s) GetArchitectures() ([]string, error) { - nodes, err := k.GetNodes() +func (k *K8s) GetArchitectures(ctx context.Context) ([]string, error) { + nodes, err := k.GetNodes(ctx) if err != nil { return nil, err } diff --git a/src/pkg/k8s/namespace.go b/src/pkg/k8s/namespace.go index 25d9abdf01..729d581c5d 100644 --- a/src/pkg/k8s/namespace.go +++ b/src/pkg/k8s/namespace.go @@ -16,26 +16,26 @@ import ( ) // GetNamespaces returns a list of namespaces in the cluster. -func (k *K8s) GetNamespaces() (*corev1.NamespaceList, error) { +func (k *K8s) GetNamespaces(ctx context.Context) (*corev1.NamespaceList, error) { metaOptions := metav1.ListOptions{} - return k.Clientset.CoreV1().Namespaces().List(context.TODO(), metaOptions) + return k.Clientset.CoreV1().Namespaces().List(ctx, metaOptions) } // UpdateNamespace updates the given namespace in the cluster. -func (k *K8s) UpdateNamespace(namespace *corev1.Namespace) (*corev1.Namespace, error) { +func (k *K8s) UpdateNamespace(ctx context.Context, namespace *corev1.Namespace) (*corev1.Namespace, error) { updateOptions := metav1.UpdateOptions{} - return k.Clientset.CoreV1().Namespaces().Update(context.TODO(), namespace, updateOptions) + return k.Clientset.CoreV1().Namespaces().Update(ctx, namespace, updateOptions) } // CreateNamespace creates the given namespace or returns it if it already exists in the cluster. -func (k *K8s) CreateNamespace(namespace *corev1.Namespace) (*corev1.Namespace, error) { +func (k *K8s) CreateNamespace(ctx context.Context, namespace *corev1.Namespace) (*corev1.Namespace, error) { metaOptions := metav1.GetOptions{} createOptions := metav1.CreateOptions{} - match, err := k.Clientset.CoreV1().Namespaces().Get(context.TODO(), namespace.Name, metaOptions) + match, err := k.Clientset.CoreV1().Namespaces().Get(ctx, namespace.Name, metaOptions) if err != nil || match.Name != namespace.Name { - return k.Clientset.CoreV1().Namespaces().Create(context.TODO(), namespace, createOptions) + return k.Clientset.CoreV1().Namespaces().Create(ctx, namespace, createOptions) } return match, err @@ -46,19 +46,20 @@ func (k *K8s) DeleteNamespace(ctx context.Context, name string) error { // Attempt to delete the namespace immediately gracePeriod := int64(0) err := k.Clientset.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}) - // If an error besides "not found" is returned, return it if err != nil && !errors.IsNotFound(err) { return err } - // Indefinitely wait for the namespace to be deleted, use context.WithTimeout to limit this for { - // Keep checking for the namespace to be deleted - _, err := k.Clientset.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(1 * time.Second): + _, err := k.Clientset.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } } - time.Sleep(1 * time.Second) } } diff --git a/src/pkg/k8s/nodes.go b/src/pkg/k8s/nodes.go index c2348e06f4..134c00b140 100644 --- a/src/pkg/k8s/nodes.go +++ b/src/pkg/k8s/nodes.go @@ -12,12 +12,12 @@ import ( ) // GetNodes returns a list of nodes from the k8s cluster. -func (k *K8s) GetNodes() (*corev1.NodeList, error) { +func (k *K8s) GetNodes(ctx context.Context) (*corev1.NodeList, error) { metaOptions := metav1.ListOptions{} - return k.Clientset.CoreV1().Nodes().List(context.TODO(), metaOptions) + return k.Clientset.CoreV1().Nodes().List(ctx, metaOptions) } // GetNode returns a node from the k8s cluster. -func (k *K8s) GetNode(nodeName string) (*corev1.Node, error) { - return k.Clientset.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func (k *K8s) GetNode(ctx context.Context, nodeName string) (*corev1.Node, error) { + return k.Clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) } diff --git a/src/pkg/k8s/pods.go b/src/pkg/k8s/pods.go index 6d0a1e2121..4739c2c2b4 100644 --- a/src/pkg/k8s/pods.go +++ b/src/pkg/k8s/pods.go @@ -15,8 +15,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const waitLimit = 30 - // GeneratePod creates a new pod without adding it to the k8s cluster. func (k *K8s) GeneratePod(name, namespace string) *corev1.Pod { pod := &corev1.Pod{ @@ -37,33 +35,37 @@ func (k *K8s) GeneratePod(name, namespace string) *corev1.Pod { } // DeletePod removes a pod from the cluster by namespace & name. -func (k *K8s) DeletePod(namespace string, name string) error { +func (k *K8s) DeletePod(ctx context.Context, namespace string, name string) error { deleteGracePeriod := int64(0) deletePolicy := metav1.DeletePropagationForeground - err := k.Clientset.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{ + + err := k.Clientset.CoreV1().Pods(namespace).Delete(ctx, name, metav1.DeleteOptions{ GracePeriodSeconds: &deleteGracePeriod, PropagationPolicy: &deletePolicy, }) - if err != nil { return err } for { - // Keep checking for the pod to be deleted - _, err := k.Clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(1 * time.Second): + _, err := k.Clientset.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } } - time.Sleep(1 * time.Second) } } // DeletePods removes a collection of pods from the cluster by pod lookup. -func (k *K8s) DeletePods(target PodLookup) error { +func (k *K8s) DeletePods(ctx context.Context, target PodLookup) error { deleteGracePeriod := int64(0) deletePolicy := metav1.DeletePropagationForeground - return k.Clientset.CoreV1().Pods(target.Namespace).DeleteCollection(context.TODO(), + return k.Clientset.CoreV1().Pods(target.Namespace).DeleteCollection( + ctx, metav1.DeleteOptions{ GracePeriodSeconds: &deleteGracePeriod, PropagationPolicy: &deletePolicy, @@ -75,112 +77,117 @@ func (k *K8s) DeletePods(target PodLookup) error { } // CreatePod inserts the given pod into the cluster. -func (k *K8s) CreatePod(pod *corev1.Pod) (*corev1.Pod, error) { +func (k *K8s) CreatePod(ctx context.Context, pod *corev1.Pod) (*corev1.Pod, error) { createOptions := metav1.CreateOptions{} - return k.Clientset.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, createOptions) + return k.Clientset.CoreV1().Pods(pod.Namespace).Create(ctx, pod, createOptions) } // GetAllPods returns a list of pods from the cluster for all namespaces. -func (k *K8s) GetAllPods() (*corev1.PodList, error) { - return k.GetPods(corev1.NamespaceAll) +func (k *K8s) GetAllPods(ctx context.Context) (*corev1.PodList, error) { + return k.GetPods(ctx, corev1.NamespaceAll) } // GetPods returns a list of pods from the cluster by namespace. -func (k *K8s) GetPods(namespace string) (*corev1.PodList, error) { +func (k *K8s) GetPods(ctx context.Context, namespace string) (*corev1.PodList, error) { metaOptions := metav1.ListOptions{} - return k.Clientset.CoreV1().Pods(namespace).List(context.TODO(), metaOptions) + return k.Clientset.CoreV1().Pods(namespace).List(ctx, metaOptions) } // WaitForPodsAndContainers attempts to find pods matching the given selector and optional inclusion filter -// It will wait up to 90 seconds for the pods to be found and will return a list of matching pod names +// It will wait up to 30 seconds for the pods to be found and will return a list of matching pod names // If the timeout is reached, an empty list will be returned. -func (k *K8s) WaitForPodsAndContainers(target PodLookup, include PodFilter) []corev1.Pod { - for count := 0; count < waitLimit; count++ { +func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, include PodFilter) []corev1.Pod { + waitCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() - pods, err := k.Clientset.CoreV1().Pods(target.Namespace).List(context.TODO(), metav1.ListOptions{ - LabelSelector: target.Selector, - }) - if err != nil { - k.Log("Unable to find matching pods: %w", err) - break - } + for { + select { + case <-waitCtx.Done(): + k.Log("Pod lookup timeout or context cancelled: %w", ctx.Err()) + return nil + case <-time.After(3 * time.Second): + pods, err := k.Clientset.CoreV1().Pods(target.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: target.Selector, + }) + if err != nil { + k.Log("Unable to find matching pods: %w", err) + break + } - k.Log("Found %d pods for target %#v", len(pods.Items), target) + k.Log("Found %d pods for target %#v", len(pods.Items), target) - var readyPods = []corev1.Pod{} + var readyPods = []corev1.Pod{} - // Sort the pods from newest to oldest - sort.Slice(pods.Items, func(i, j int) bool { - return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) - }) + // Sort the pods from newest to oldest + sort.Slice(pods.Items, func(i, j int) bool { + return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) + }) - for _, pod := range pods.Items { - k.Log("Testing pod %q", pod.Name) + for _, pod := range pods.Items { + k.Log("Testing pod %q", pod.Name) - // If an include function is provided, only keep pods that return true - if include != nil && !include(pod) { - continue - } + // If an include function is provided, only keep pods that return true + if include != nil && !include(pod) { + continue + } - // Handle container targeting - if target.Container != "" { - k.Log("Testing pod %q for container %q", pod.Name, target.Container) - var matchesInitContainer bool - - // Check the status of initContainers for a running match - for _, initContainer := range pod.Status.InitContainerStatuses { - isRunning := initContainer.State.Running != nil - if isRunning && initContainer.Name == target.Container { - // On running match in initContainer break this loop - matchesInitContainer = true - readyPods = append(readyPods, pod) - break + // Handle container targeting + if target.Container != "" { + k.Log("Testing pod %q for container %q", pod.Name, target.Container) + var matchesInitContainer bool + + // Check the status of initContainers for a running match + for _, initContainer := range pod.Status.InitContainerStatuses { + isRunning := initContainer.State.Running != nil + if isRunning && initContainer.Name == target.Container { + // On running match in initContainer break this loop + matchesInitContainer = true + readyPods = append(readyPods, pod) + break + } } - } - // Don't check any further if there's already a match - if matchesInitContainer { - continue - } + // Don't check any further if there's already a match + if matchesInitContainer { + continue + } - // Check the status of regular containers for a running match - for _, container := range pod.Status.ContainerStatuses { - isRunning := container.State.Running != nil - if isRunning && container.Name == target.Container { + // Check the status of regular containers for a running match + for _, container := range pod.Status.ContainerStatuses { + isRunning := container.State.Running != nil + if isRunning && container.Name == target.Container { + readyPods = append(readyPods, pod) + } + } + } else { + status := pod.Status.Phase + k.Log("Testing pod %q phase, want (%q) got (%q)", pod.Name, corev1.PodRunning, status) + // Regular status checking without a container + if status == corev1.PodRunning { readyPods = append(readyPods, pod) } } - } else { - status := pod.Status.Phase - k.Log("Testing pod %q phase, want (%q) got (%q)", pod.Name, corev1.PodRunning, status) - // Regular status checking without a container - if status == corev1.PodRunning { - readyPods = append(readyPods, pod) - } + } + if len(readyPods) > 0 { + return readyPods } } - - if len(readyPods) > 0 { - return readyPods - } - - time.Sleep(3 * time.Second) } - - k.Log("Pod lookup timeout exceeded") - - return []corev1.Pod{} } // FindPodContainerPort will find a pod's container port from a service and return it. // // Returns 0 if no port is found. -func (k *K8s) FindPodContainerPort(svc corev1.Service) int { +func (k *K8s) FindPodContainerPort(ctx context.Context, svc corev1.Service) int { selectorLabelsOfPods := MakeLabels(svc.Spec.Selector) - pods := k.WaitForPodsAndContainers(PodLookup{ - Namespace: svc.Namespace, - Selector: selectorLabelsOfPods, - }, nil) + pods := k.WaitForPodsAndContainers( + ctx, + PodLookup{ + Namespace: svc.Namespace, + Selector: selectorLabelsOfPods, + }, + nil, + ) for _, pod := range pods { // Find the matching name on the port in the pod diff --git a/src/pkg/k8s/sa.go b/src/pkg/k8s/sa.go index 26e48d134d..33eaaf4bc7 100644 --- a/src/pkg/k8s/sa.go +++ b/src/pkg/k8s/sa.go @@ -15,47 +15,42 @@ import ( ) // GetAllServiceAccounts returns a list of services accounts for all namespaces. -func (k *K8s) GetAllServiceAccounts() (*corev1.ServiceAccountList, error) { - return k.GetServiceAccounts(corev1.NamespaceAll) +func (k *K8s) GetAllServiceAccounts(ctx context.Context) (*corev1.ServiceAccountList, error) { + return k.GetServiceAccounts(ctx, corev1.NamespaceAll) } // GetServiceAccounts returns a list of service accounts in a given namespace. -func (k *K8s) GetServiceAccounts(namespace string) (*corev1.ServiceAccountList, error) { +func (k *K8s) GetServiceAccounts(ctx context.Context, namespace string) (*corev1.ServiceAccountList, error) { metaOptions := metav1.ListOptions{} - return k.Clientset.CoreV1().ServiceAccounts(namespace).List(context.TODO(), metaOptions) + return k.Clientset.CoreV1().ServiceAccounts(namespace).List(ctx, metaOptions) } // GetServiceAccount returns a single service account by namespace and name. -func (k *K8s) GetServiceAccount(namespace, name string) (*corev1.ServiceAccount, error) { +func (k *K8s) GetServiceAccount(ctx context.Context, namespace, name string) (*corev1.ServiceAccount, error) { metaOptions := metav1.GetOptions{} - return k.Clientset.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), name, metaOptions) + return k.Clientset.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metaOptions) } // UpdateServiceAccount updates the given service account in the cluster. -func (k *K8s) UpdateServiceAccount(svcAccount *corev1.ServiceAccount) (*corev1.ServiceAccount, error) { +func (k *K8s) UpdateServiceAccount(ctx context.Context, svcAccount *corev1.ServiceAccount) (*corev1.ServiceAccount, error) { metaOptions := metav1.UpdateOptions{} - return k.Clientset.CoreV1().ServiceAccounts(svcAccount.Namespace).Update(context.TODO(), svcAccount, metaOptions) + return k.Clientset.CoreV1().ServiceAccounts(svcAccount.Namespace).Update(ctx, svcAccount, metaOptions) } // WaitForServiceAccount waits for a service account to be created in the cluster. -func (k *K8s) WaitForServiceAccount(ns, name string, timeout time.Duration) (*corev1.ServiceAccount, error) { - expired := time.After(timeout) - +func (k *K8s) WaitForServiceAccount(ctx context.Context, ns, name string) (*corev1.ServiceAccount, error) { for { select { - case <-expired: - return nil, fmt.Errorf("timed out waiting for service account %s/%s to exist", ns, name) - - default: - sa, err := k.Clientset.CoreV1().ServiceAccounts(ns).Get(context.TODO(), name, metav1.GetOptions{}) + case <-ctx.Done(): + return nil, fmt.Errorf("timed out waiting for service account %s/%s to exist: %w", ns, name, ctx.Err()) + case <-time.After(1 * time.Second): + sa, err := k.Clientset.CoreV1().ServiceAccounts(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { - time.Sleep(1 * time.Second) continue } return nil, fmt.Errorf("error getting service account %s/%s: %w", ns, name, err) } - return sa, nil } } diff --git a/src/pkg/k8s/secrets.go b/src/pkg/k8s/secrets.go index 30965caa0c..84d11960d7 100644 --- a/src/pkg/k8s/secrets.go +++ b/src/pkg/k8s/secrets.go @@ -16,14 +16,14 @@ import ( ) // GetSecret returns a Kubernetes secret. -func (k *K8s) GetSecret(namespace, name string) (*corev1.Secret, error) { - return k.Clientset.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func (k *K8s) GetSecret(ctx context.Context, namespace, name string) (*corev1.Secret, error) { + return k.Clientset.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) } // GetSecretsWithLabel returns a list of Kubernetes secrets with the given label. -func (k *K8s) GetSecretsWithLabel(namespace, labelSelector string) (*corev1.SecretList, error) { +func (k *K8s) GetSecretsWithLabel(ctx context.Context, namespace, labelSelector string) (*corev1.SecretList, error) { listOptions := metav1.ListOptions{LabelSelector: labelSelector} - return k.Clientset.CoreV1().Secrets(namespace).List(context.TODO(), listOptions) + return k.Clientset.CoreV1().Secrets(namespace).List(ctx, listOptions) } // GenerateSecret returns a Kubernetes secret object without applying it to the cluster. @@ -61,20 +61,20 @@ func (k *K8s) GenerateTLSSecret(namespace, name string, conf GeneratedPKI) (*cor } // CreateOrUpdateTLSSecret creates or updates a Kubernetes secret with a new TLS secret. -func (k *K8s) CreateOrUpdateTLSSecret(namespace, name string, conf GeneratedPKI) (*corev1.Secret, error) { +func (k *K8s) CreateOrUpdateTLSSecret(ctx context.Context, namespace, name string, conf GeneratedPKI) (*corev1.Secret, error) { secret, err := k.GenerateTLSSecret(namespace, name, conf) if err != nil { return secret, err } - return k.CreateOrUpdateSecret(secret) + return k.CreateOrUpdateSecret(ctx, secret) } // DeleteSecret deletes a Kubernetes secret. -func (k *K8s) DeleteSecret(secret *corev1.Secret) error { +func (k *K8s) DeleteSecret(ctx context.Context, secret *corev1.Secret) error { namespaceSecrets := k.Clientset.CoreV1().Secrets(secret.Namespace) - err := namespaceSecrets.Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}) + err := namespaceSecrets.Delete(ctx, secret.Name, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("error deleting the secret: %w", err) } @@ -83,18 +83,18 @@ func (k *K8s) DeleteSecret(secret *corev1.Secret) error { } // CreateOrUpdateSecret creates or updates a Kubernetes secret. -func (k *K8s) CreateOrUpdateSecret(secret *corev1.Secret) (createdSecret *corev1.Secret, err error) { +func (k *K8s) CreateOrUpdateSecret(ctx context.Context, secret *corev1.Secret) (createdSecret *corev1.Secret, err error) { namespaceSecrets := k.Clientset.CoreV1().Secrets(secret.Namespace) - if _, err = k.GetSecret(secret.Namespace, secret.Name); err != nil { + if _, err = k.GetSecret(ctx, secret.Namespace, secret.Name); err != nil { // create the given secret - if createdSecret, err = namespaceSecrets.Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if createdSecret, err = namespaceSecrets.Create(ctx, secret, metav1.CreateOptions{}); err != nil { return createdSecret, fmt.Errorf("unable to create the secret: %w", err) } } else { // update the given secret - if createdSecret, err = namespaceSecrets.Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { + if createdSecret, err = namespaceSecrets.Update(ctx, secret, metav1.UpdateOptions{}); err != nil { return createdSecret, fmt.Errorf("unable to update the secret: %w", err) } } diff --git a/src/pkg/k8s/services.go b/src/pkg/k8s/services.go index 9b14f9a0d3..4757615796 100644 --- a/src/pkg/k8s/services.go +++ b/src/pkg/k8s/services.go @@ -27,12 +27,12 @@ type ServiceInfo struct { } // ReplaceService deletes and re-creates a service. -func (k *K8s) ReplaceService(service *corev1.Service) (*corev1.Service, error) { - if err := k.DeleteService(service.Namespace, service.Name); err != nil { +func (k *K8s) ReplaceService(ctx context.Context, service *corev1.Service) (*corev1.Service, error) { + if err := k.DeleteService(ctx, service.Namespace, service.Name); err != nil { return nil, err } - return k.CreateService(service) + return k.CreateService(ctx, service) } // GenerateService returns a K8s service struct without writing to the cluster. @@ -56,28 +56,28 @@ func (k *K8s) GenerateService(namespace, name string) *corev1.Service { } // DeleteService removes a service from the cluster by namespace and name. -func (k *K8s) DeleteService(namespace, name string) error { - return k.Clientset.CoreV1().Services(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) +func (k *K8s) DeleteService(ctx context.Context, namespace, name string) error { + return k.Clientset.CoreV1().Services(namespace).Delete(ctx, name, metav1.DeleteOptions{}) } // CreateService creates the given service in the cluster. -func (k *K8s) CreateService(service *corev1.Service) (*corev1.Service, error) { +func (k *K8s) CreateService(ctx context.Context, service *corev1.Service) (*corev1.Service, error) { createOptions := metav1.CreateOptions{} - return k.Clientset.CoreV1().Services(service.Namespace).Create(context.TODO(), service, createOptions) + return k.Clientset.CoreV1().Services(service.Namespace).Create(ctx, service, createOptions) } // GetService returns a Kubernetes service resource in the provided namespace with the given name. -func (k *K8s) GetService(namespace, serviceName string) (*corev1.Service, error) { - return k.Clientset.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) +func (k *K8s) GetService(ctx context.Context, namespace, serviceName string) (*corev1.Service, error) { + return k.Clientset.CoreV1().Services(namespace).Get(ctx, serviceName, metav1.GetOptions{}) } // GetServices returns a list of services in the provided namespace. To search all namespaces, pass "" in the namespace arg. -func (k *K8s) GetServices(namespace string) (*corev1.ServiceList, error) { - return k.Clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{}) +func (k *K8s) GetServices(ctx context.Context, namespace string) (*corev1.ServiceList, error) { + return k.Clientset.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{}) } // GetServicesByLabel returns a list of matched services given a label and value. To search all namespaces, pass "" in the namespace arg. -func (k *K8s) GetServicesByLabel(namespace, label, value string) (*corev1.ServiceList, error) { +func (k *K8s) GetServicesByLabel(ctx context.Context, namespace, label, value string) (*corev1.ServiceList, error) { // Create the selector and add the requirement labelSelector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ MatchLabels: Labels{ @@ -86,11 +86,11 @@ func (k *K8s) GetServicesByLabel(namespace, label, value string) (*corev1.Servic }) // Run the query with the selector and return as a ServiceList - return k.Clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()}) + return k.Clientset.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector.String()}) } // GetServicesByLabelExists returns a list of matched services given a label. To search all namespaces, pass "" in the namespace arg. -func (k *K8s) GetServicesByLabelExists(namespace, label string) (*corev1.ServiceList, error) { +func (k *K8s) GetServicesByLabelExists(ctx context.Context, namespace, label string) (*corev1.ServiceList, error) { // Create the selector and add the requirement labelSelector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ @@ -100,12 +100,12 @@ func (k *K8s) GetServicesByLabelExists(namespace, label string) (*corev1.Service }) // Run the query with the selector and return as a ServiceList - return k.Clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()}) + return k.Clientset.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector.String()}) } // ServiceInfoFromNodePortURL takes a nodePortURL and parses it to find the service info for connecting to the cluster. The string is expected to follow the following format: // Example nodePortURL: 127.0.0.1:{PORT}. -func (k *K8s) ServiceInfoFromNodePortURL(nodePortURL string) (*ServiceInfo, error) { +func (k *K8s) ServiceInfoFromNodePortURL(ctx context.Context, nodePortURL string) (*ServiceInfo, error) { // Attempt to parse as normal, if this fails add a scheme to the URL (docker registries don't use schemes) parsedURL, err := url.Parse(nodePortURL) if err != nil { @@ -130,7 +130,7 @@ func (k *K8s) ServiceInfoFromNodePortURL(nodePortURL string) (*ServiceInfo, erro return nil, fmt.Errorf("node port services should use the port range 30000-32767") } - services, err := k.GetServices("") + services, err := k.GetServices(ctx, "") if err != nil { return nil, err } diff --git a/src/pkg/k8s/tunnel.go b/src/pkg/k8s/tunnel.go index a4b910fccf..8618156eb0 100644 --- a/src/pkg/k8s/tunnel.go +++ b/src/pkg/k8s/tunnel.go @@ -7,6 +7,7 @@ package k8s // Forked from https://github.com/gruntwork-io/terratest/blob/v0.38.8/modules/k8s/tunnel.go import ( + "context" "fmt" "io" "net/http" @@ -79,22 +80,30 @@ func (tunnel *Tunnel) Wrap(function func() error) error { } // Connect will establish a tunnel to the specified target. -func (tunnel *Tunnel) Connect() (string, error) { - url, err := tunnel.establish() +func (tunnel *Tunnel) Connect(ctx context.Context) (string, error) { + url, err := tunnel.establish(ctx) // Try to establish the tunnel up to 3 times. if err != nil { tunnel.attempt++ + // If we have exceeded the number of attempts, exit with an error. if tunnel.attempt > 3 { return "", fmt.Errorf("unable to establish tunnel after 3 attempts: %w", err) } + // Otherwise, retry the connection but delay increasing intervals between attempts. delay := tunnel.attempt * 10 tunnel.kube.Log("%s", err.Error()) tunnel.kube.Log("Delay creating tunnel, waiting %d seconds...", delay) - time.Sleep(time.Duration(delay) * time.Second) - url, err = tunnel.Connect() + + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-time.After(time.Duration(delay) * time.Second): + } + + url, err = tunnel.Connect(ctx) if err != nil { return "", err } @@ -129,7 +138,7 @@ func (tunnel *Tunnel) Close() { } // establish opens a tunnel to a kubernetes resource, as specified by the provided tunnel struct. -func (tunnel *Tunnel) establish() (string, error) { +func (tunnel *Tunnel) establish(ctx context.Context) (string, error) { var err error // Track this locally as we may need to retry if the tunnel fails. @@ -163,7 +172,7 @@ func (tunnel *Tunnel) establish() (string, error) { tunnel.kube.Log(message) // Find the pod to port forward to - podName, err := tunnel.getAttachablePodForResource() + podName, err := tunnel.getAttachablePodForResource(ctx) if err != nil { return "", fmt.Errorf("unable to find pod attached to given resource: %w", err) } @@ -222,29 +231,33 @@ func (tunnel *Tunnel) establish() (string, error) { // getAttachablePodForResource will find a pod that can be port forwarded to the provided resource type and return // the name. -func (tunnel *Tunnel) getAttachablePodForResource() (string, error) { +func (tunnel *Tunnel) getAttachablePodForResource(ctx context.Context) (string, error) { switch tunnel.resourceType { case PodResource: return tunnel.resourceName, nil case SvcResource: - return tunnel.getAttachablePodForService() + return tunnel.getAttachablePodForService(ctx) default: return "", fmt.Errorf("unknown resource type: %s", tunnel.resourceType) } } // getAttachablePodForService will find an active pod associated with the Service and return the pod name. -func (tunnel *Tunnel) getAttachablePodForService() (string, error) { - service, err := tunnel.kube.GetService(tunnel.namespace, tunnel.resourceName) +func (tunnel *Tunnel) getAttachablePodForService(ctx context.Context) (string, error) { + service, err := tunnel.kube.GetService(ctx, tunnel.namespace, tunnel.resourceName) if err != nil { return "", fmt.Errorf("unable to find the service: %w", err) } selectorLabelsOfPods := MakeLabels(service.Spec.Selector) - servicePods := tunnel.kube.WaitForPodsAndContainers(PodLookup{ - Namespace: tunnel.namespace, - Selector: selectorLabelsOfPods, - }, nil) + servicePods := tunnel.kube.WaitForPodsAndContainers( + ctx, + PodLookup{ + Namespace: tunnel.namespace, + Selector: selectorLabelsOfPods, + }, + nil, + ) if len(servicePods) < 1 { return "", fmt.Errorf("no pods found for service %s", tunnel.resourceName) diff --git a/src/pkg/packager/common.go b/src/pkg/packager/common.go index ba1335852e..9d9d26be1b 100644 --- a/src/pkg/packager/common.go +++ b/src/pkg/packager/common.go @@ -5,11 +5,11 @@ package packager import ( + "context" "errors" "fmt" "os" "strings" - "time" "slices" @@ -154,17 +154,17 @@ func (p *Packager) ClearTempPaths() { } // connectToCluster attempts to connect to a cluster if a connection is not already established -func (p *Packager) connectToCluster(timeout time.Duration) (err error) { +func (p *Packager) connectToCluster(ctx context.Context) (err error) { if p.isConnectedToCluster() { return nil } - p.cluster, err = cluster.NewClusterWithWait(timeout) + p.cluster, err = cluster.NewClusterWithWait(ctx) if err != nil { return err } - return p.attemptClusterChecks() + return p.attemptClusterChecks(ctx) } // isConnectedToCluster returns whether the current packager instance is connected to a cluster @@ -184,19 +184,19 @@ func (p *Packager) hasImages() bool { // attemptClusterChecks attempts to connect to the cluster and check for useful metadata and config mismatches. // NOTE: attemptClusterChecks should only return an error if there is a problem significant enough to halt a deployment, otherwise it should return nil and print a warning message. -func (p *Packager) attemptClusterChecks() (err error) { +func (p *Packager) attemptClusterChecks(ctx context.Context) (err error) { spinner := message.NewProgressSpinner("Gathering additional cluster information (if available)") defer spinner.Stop() // Check if the package has already been deployed and get its generation - if existingDeployedPackage, _ := p.cluster.GetDeployedPackage(p.cfg.Pkg.Metadata.Name); existingDeployedPackage != nil { + if existingDeployedPackage, _ := p.cluster.GetDeployedPackage(ctx, p.cfg.Pkg.Metadata.Name); existingDeployedPackage != nil { // If this package has been deployed before, increment the package generation within the secret p.generation = existingDeployedPackage.Generation + 1 } // Check the clusters architecture matches the package spec - if err := p.validatePackageArchitecture(); err != nil { + if err := p.validatePackageArchitecture(ctx); err != nil { if errors.Is(err, lang.ErrUnableToCheckArch) { message.Warnf("Unable to validate package architecture: %s", err.Error()) } else { @@ -205,7 +205,7 @@ func (p *Packager) attemptClusterChecks() (err error) { } // Check for any breaking changes between the initialized Zarf version and this CLI - if existingInitPackage, _ := p.cluster.GetDeployedPackage("init"); existingInitPackage != nil { + if existingInitPackage, _ := p.cluster.GetDeployedPackage(ctx, "init"); existingInitPackage != nil { // Use the build version instead of the metadata since this will support older Zarf versions deprecated.PrintBreakingChanges(existingInitPackage.Data.Build.Version) } @@ -216,13 +216,13 @@ func (p *Packager) attemptClusterChecks() (err error) { } // validatePackageArchitecture validates that the package architecture matches the target cluster architecture. -func (p *Packager) validatePackageArchitecture() error { +func (p *Packager) validatePackageArchitecture(ctx context.Context) error { // Ignore this check if we don't have a cluster connection, or the package contains no images if !p.isConnectedToCluster() || !p.hasImages() { return nil } - clusterArchitectures, err := p.cluster.GetArchitectures() + clusterArchitectures, err := p.cluster.GetArchitectures(ctx) if err != nil { return lang.ErrUnableToCheckArch } diff --git a/src/pkg/packager/common_test.go b/src/pkg/packager/common_test.go index 1f497415be..a445ec4e3b 100644 --- a/src/pkg/packager/common_test.go +++ b/src/pkg/packager/common_test.go @@ -1,6 +1,7 @@ package packager import ( + "context" "errors" "fmt" "testing" @@ -129,7 +130,7 @@ func TestValidatePackageArchitecture(t *testing.T) { return true, nodeList, nil }) - err := p.validatePackageArchitecture() + err := p.validatePackageArchitecture(context.TODO()) require.Equal(t, testCase.expectedError, err) }) diff --git a/src/pkg/packager/deploy.go b/src/pkg/packager/deploy.go index 7ec050fd4f..f7da48e34f 100644 --- a/src/pkg/packager/deploy.go +++ b/src/pkg/packager/deploy.go @@ -5,6 +5,7 @@ package packager import ( + "context" "fmt" "os" "path/filepath" @@ -33,16 +34,16 @@ import ( corev1 "k8s.io/api/core/v1" ) -func (p *Packager) resetRegistryHPA() { +func (p *Packager) resetRegistryHPA(ctx context.Context) { if p.isConnectedToCluster() && p.hpaModified { - if err := p.cluster.EnableRegHPAScaleDown(); err != nil { + if err := p.cluster.EnableRegHPAScaleDown(ctx); err != nil { message.Debugf("unable to reenable the registry HPA scale down: %s", err.Error()) } } } // Deploy attempts to deploy the given PackageConfig. -func (p *Packager) Deploy() (err error) { +func (p *Packager) Deploy(ctx context.Context) (err error) { isInteractive := !config.CommonOptions.Confirm @@ -101,10 +102,10 @@ func (p *Packager) Deploy() (err error) { p.hpaModified = false p.connectStrings = make(types.ConnectStrings) // Reset registry HPA scale down whether an error occurs or not - defer p.resetRegistryHPA() + defer p.resetRegistryHPA(ctx) // Get a list of all the components we are deploying and actually deploy them - deployedComponents, err := p.deployComponents() + deployedComponents, err := p.deployComponents(ctx) if err != nil { return err } @@ -115,13 +116,13 @@ func (p *Packager) Deploy() (err error) { // Notify all the things about the successful deployment message.Successf("Zarf deployment complete") - p.printTablesForDeployment(deployedComponents) + p.printTablesForDeployment(ctx, deployedComponents) return nil } // deployComponents loops through a list of ZarfComponents and deploys them. -func (p *Packager) deployComponents() (deployedComponents []types.DeployedComponent, err error) { +func (p *Packager) deployComponents(ctx context.Context) (deployedComponents []types.DeployedComponent, err error) { // Generate a value template if p.valueTemplate, err = template.Generate(p.cfg); err != nil { return deployedComponents, fmt.Errorf("unable to generate the value template: %w", err) @@ -143,19 +144,14 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon // If this component requires a cluster, connect to one if component.RequiresCluster() { - timeout := cluster.DefaultTimeout - if p.cfg.Pkg.IsInitConfig() { - timeout = 5 * time.Minute - } - - if err := p.connectToCluster(timeout); err != nil { + if err := p.connectToCluster(ctx); err != nil { return deployedComponents, fmt.Errorf("unable to connect to the Kubernetes cluster: %w", err) } } // Ensure we don't overwrite any installedCharts data when updating the package secret if p.isConnectedToCluster() { - deployedComponent.InstalledCharts, err = p.cluster.GetInstalledChartsForComponent(p.cfg.Pkg.Metadata.Name, component) + deployedComponent.InstalledCharts, err = p.cluster.GetInstalledChartsForComponent(ctx, p.cfg.Pkg.Metadata.Name, component) if err != nil { message.Debugf("Unable to fetch installed Helm charts for component '%s': %s", component.Name, err.Error()) } @@ -166,7 +162,7 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon // Update the package secret to indicate that we are attempting to deploy this component if p.isConnectedToCluster() { - if _, err := p.cluster.RecordPackageDeploymentAndWait(p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { + if _, err := p.cluster.RecordPackageDeploymentAndWait(ctx, p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { message.Debugf("Unable to record package deployment for component %s: this will affect features like `zarf package remove`: %s", component.Name, err.Error()) } } @@ -175,9 +171,9 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon var charts []types.InstalledChart var deployErr error if p.cfg.Pkg.IsInitConfig() { - charts, deployErr = p.deployInitComponent(component) + charts, deployErr = p.deployInitComponent(ctx, component) } else { - charts, deployErr = p.deployComponent(component, false /* keep img checksum */, false /* always push images */) + charts, deployErr = p.deployComponent(ctx, component, false /* keep img checksum */, false /* always push images */) } onDeploy := component.Actions.OnDeploy @@ -194,7 +190,7 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon // Update the package secret to indicate that we failed to deploy this component deployedComponents[idx].Status = types.ComponentStatusFailed if p.isConnectedToCluster() { - if _, err := p.cluster.RecordPackageDeploymentAndWait(p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { + if _, err := p.cluster.RecordPackageDeploymentAndWait(ctx, p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { message.Debugf("Unable to record package deployment for component %q: this will affect features like `zarf package remove`: %s", component.Name, err.Error()) } } @@ -206,7 +202,7 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon deployedComponents[idx].InstalledCharts = charts deployedComponents[idx].Status = types.ComponentStatusSucceeded if p.isConnectedToCluster() { - if _, err := p.cluster.RecordPackageDeploymentAndWait(p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { + if _, err := p.cluster.RecordPackageDeploymentAndWait(ctx, p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { message.Debugf("Unable to record package deployment for component %q: this will affect features like `zarf package remove`: %s", component.Name, err.Error()) } } @@ -220,7 +216,7 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon return deployedComponents, nil } -func (p *Packager) deployInitComponent(component types.ZarfComponent) (charts []types.InstalledChart, err error) { +func (p *Packager) deployInitComponent(ctx context.Context, component types.ZarfComponent) (charts []types.InstalledChart, err error) { hasExternalRegistry := p.cfg.InitOpts.RegistryInfo.Address != "" isSeedRegistry := component.Name == "zarf-seed-registry" isRegistry := component.Name == "zarf-registry" @@ -234,7 +230,7 @@ func (p *Packager) deployInitComponent(component types.ZarfComponent) (charts [] // Always init the state before the first component that requires the cluster (on most deployments, the zarf-seed-registry) if component.RequiresCluster() && p.cfg.State == nil { - err = p.cluster.InitZarfState(p.cfg.InitOpts) + err = p.cluster.InitZarfState(ctx, p.cfg.InitOpts) if err != nil { return charts, fmt.Errorf("unable to initialize Zarf state: %w", err) } @@ -252,17 +248,17 @@ func (p *Packager) deployInitComponent(component types.ZarfComponent) (charts [] // Before deploying the seed registry, start the injector if isSeedRegistry { - p.cluster.StartInjectionMadness(p.layout.Base, p.layout.Images.Base, component.Images) + p.cluster.StartInjectionMadness(ctx, p.layout.Base, p.layout.Images.Base, component.Images) } - charts, err = p.deployComponent(component, isAgent /* skip img checksum if isAgent */, isSeedRegistry /* skip image push if isSeedRegistry */) + charts, err = p.deployComponent(ctx, component, isAgent /* skip img checksum if isAgent */, isSeedRegistry /* skip image push if isSeedRegistry */) if err != nil { return charts, err } // Do cleanup for when we inject the seed registry during initialization if isSeedRegistry { - if err := p.cluster.StopInjectionMadness(); err != nil { + if err := p.cluster.StopInjectionMadness(ctx); err != nil { return charts, fmt.Errorf("unable to seed the Zarf Registry: %w", err) } } @@ -271,7 +267,7 @@ func (p *Packager) deployInitComponent(component types.ZarfComponent) (charts [] } // Deploy a Zarf Component. -func (p *Packager) deployComponent(component types.ZarfComponent, noImgChecksum bool, noImgPush bool) (charts []types.InstalledChart, err error) { +func (p *Packager) deployComponent(ctx context.Context, component types.ZarfComponent, noImgChecksum bool, noImgPush bool) (charts []types.InstalledChart, err error) { // Toggles for general deploy operations componentPath := p.layout.Components.Dirs[component.Name] @@ -289,14 +285,14 @@ func (p *Packager) deployComponent(component types.ZarfComponent, noImgChecksum if !p.valueTemplate.Ready() && component.RequiresCluster() { // Setup the state in the config and get the valuesTemplate - p.valueTemplate, err = p.setupStateValuesTemplate() + p.valueTemplate, err = p.setupStateValuesTemplate(ctx) if err != nil { return charts, err } // Disable the registry HPA scale down if we are deploying images and it is not already disabled if hasImages && !p.hpaModified && p.cfg.State.RegistryInfo.InternalRegistry { - if err := p.cluster.DisableRegHPAScaleDown(); err != nil { + if err := p.cluster.DisableRegHPAScaleDown(ctx); err != nil { message.Debugf("unable to disable the registry HPA scale down: %s", err.Error()) } else { p.hpaModified = true @@ -315,13 +311,13 @@ func (p *Packager) deployComponent(component types.ZarfComponent, noImgChecksum } if hasImages { - if err := p.pushImagesToRegistry(component.Images, noImgChecksum); err != nil { + if err := p.pushImagesToRegistry(ctx, component.Images, noImgChecksum); err != nil { return charts, fmt.Errorf("unable to push images to the registry: %w", err) } } if hasRepos { - if err = p.pushReposToRepository(componentPath.Repos, component.Repos); err != nil { + if err = p.pushReposToRepository(ctx, componentPath.Repos, component.Repos); err != nil { return charts, fmt.Errorf("unable to push the repos to the repository: %w", err) } } @@ -332,7 +328,7 @@ func (p *Packager) deployComponent(component types.ZarfComponent, noImgChecksum for idx, data := range component.DataInjections { waitGroup.Add(1) - go p.cluster.HandleDataInjection(&waitGroup, data, componentPath, idx) + go p.cluster.HandleDataInjection(ctx, &waitGroup, data, componentPath, idx) } } @@ -429,12 +425,12 @@ func (p *Packager) processComponentFiles(component types.ZarfComponent, pkgLocat } // setupStateValuesTemplate fetched the current ZarfState from the k8s cluster and generate a p.valueTemplate from the state values. -func (p *Packager) setupStateValuesTemplate() (values *template.Values, err error) { +func (p *Packager) setupStateValuesTemplate(ctx context.Context) (values *template.Values, err error) { // If we are touching K8s, make sure we can talk to it once per deployment spinner := message.NewProgressSpinner("Loading the Zarf State from the Kubernetes cluster") defer spinner.Stop() - state, err := p.cluster.LoadZarfState() + state, err := p.cluster.LoadZarfState(ctx) // Return on error if we are not in YOLO mode if err != nil && !p.cfg.Pkg.Metadata.YOLO { return nil, fmt.Errorf("%s %w", lang.ErrLoadState, err) @@ -446,7 +442,7 @@ func (p *Packager) setupStateValuesTemplate() (values *template.Values, err erro // Try to create the zarf namespace spinner.Updatef("Creating the Zarf namespace") zarfNamespace := p.cluster.NewZarfManagedNamespace(cluster.ZarfNamespaceName) - if _, err := p.cluster.CreateNamespace(zarfNamespace); err != nil { + if _, err := p.cluster.CreateNamespace(ctx, zarfNamespace); err != nil { spinner.Fatalf(err, "Unable to create the zarf namespace") } } @@ -470,7 +466,7 @@ func (p *Packager) setupStateValuesTemplate() (values *template.Values, err erro } // Push all of the components images to the configured container registry. -func (p *Packager) pushImagesToRegistry(componentImages []string, noImgChecksum bool) error { +func (p *Packager) pushImagesToRegistry(ctx context.Context, componentImages []string, noImgChecksum bool) error { if len(componentImages) == 0 { return nil } @@ -496,12 +492,12 @@ func (p *Packager) pushImagesToRegistry(componentImages []string, noImgChecksum } return helpers.Retry(func() error { - return imgConfig.PushToZarfRegistry() + return imgConfig.PushToZarfRegistry(ctx) }, p.cfg.PkgOpts.Retries, 5*time.Second, message.Warnf) } // Push all of the components git repos to the configured git server. -func (p *Packager) pushReposToRepository(reposPath string, repos []string) error { +func (p *Packager) pushReposToRepository(ctx context.Context, reposPath string, repos []string) error { for _, repoURL := range repos { // Create an anonymous function to push the repo to the Zarf git server tryPush := func() error { @@ -514,7 +510,7 @@ func (p *Packager) pushReposToRepository(reposPath string, repos []string) error // If this is a service (svcInfo is not nil), create a port-forward tunnel to that resource if svcInfo != nil { if !p.isConnectedToCluster() { - err := p.connectToCluster(5 * time.Second) + err := p.connectToCluster(ctx) if err != nil { return err } @@ -525,7 +521,7 @@ func (p *Packager) pushReposToRepository(reposPath string, repos []string) error return err } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return err } @@ -648,7 +644,7 @@ func (p *Packager) installChartAndManifests(componentPaths *layout.ComponentPath return installedCharts, nil } -func (p *Packager) printTablesForDeployment(componentsToDeploy []types.DeployedComponent) { +func (p *Packager) printTablesForDeployment(ctx context.Context, componentsToDeploy []types.DeployedComponent) { // If not init config, print the application connection table if !p.cfg.Pkg.IsInitConfig() { @@ -656,7 +652,7 @@ func (p *Packager) printTablesForDeployment(componentsToDeploy []types.DeployedC } else { if p.cluster != nil { // Grab a fresh copy of the state (if we are able) to print the most up-to-date version of the creds - freshState, err := p.cluster.LoadZarfState() + freshState, err := p.cluster.LoadZarfState(ctx) if err != nil { freshState = p.cfg.State } diff --git a/src/pkg/packager/dev.go b/src/pkg/packager/dev.go index 1679933cfe..dd2186d5ff 100644 --- a/src/pkg/packager/dev.go +++ b/src/pkg/packager/dev.go @@ -5,6 +5,7 @@ package packager import ( + "context" "fmt" "os" "runtime" @@ -21,7 +22,7 @@ import ( ) // DevDeploy creates + deploys a package in one shot -func (p *Packager) DevDeploy() error { +func (p *Packager) DevDeploy(ctx context.Context) error { config.CommonOptions.Confirm = true p.cfg.CreateOpts.SkipSBOM = !p.cfg.CreateOpts.NoYOLO @@ -83,11 +84,11 @@ func (p *Packager) DevDeploy() error { } else { p.hpaModified = false // Reset registry HPA scale down whether an error occurs or not - defer p.resetRegistryHPA() + defer p.resetRegistryHPA(ctx) } // Get a list of all the components we are deploying and actually deploy them - deployedComponents, err := p.deployComponents() + deployedComponents, err := p.deployComponents(ctx) if err != nil { return err } diff --git a/src/pkg/packager/mirror.go b/src/pkg/packager/mirror.go index f8f90fad55..1341f527ee 100644 --- a/src/pkg/packager/mirror.go +++ b/src/pkg/packager/mirror.go @@ -5,6 +5,7 @@ package packager import ( + "context" "fmt" "runtime" "strings" @@ -16,7 +17,7 @@ import ( ) // Mirror pulls resources from a package (images, git repositories, etc) and pushes them to remotes in the air gap without deploying them -func (p *Packager) Mirror() (err error) { +func (p *Packager) Mirror(ctx context.Context) (err error) { filter := filters.Combine( filters.ByLocalOS(runtime.GOOS), filters.BySelectState(p.cfg.PkgOpts.OptionalComponents), @@ -46,7 +47,7 @@ func (p *Packager) Mirror() (err error) { } for _, component := range p.cfg.Pkg.Components { - if err := p.mirrorComponent(component); err != nil { + if err := p.mirrorComponent(ctx, component); err != nil { return err } } @@ -54,7 +55,7 @@ func (p *Packager) Mirror() (err error) { } // mirrorComponent mirrors a Zarf Component. -func (p *Packager) mirrorComponent(component types.ZarfComponent) error { +func (p *Packager) mirrorComponent(ctx context.Context, component types.ZarfComponent) error { componentPaths := p.layout.Components.Dirs[component.Name] // All components now require a name @@ -64,13 +65,13 @@ func (p *Packager) mirrorComponent(component types.ZarfComponent) error { hasRepos := len(component.Repos) > 0 if hasImages { - if err := p.pushImagesToRegistry(component.Images, p.cfg.MirrorOpts.NoImgChecksum); err != nil { + if err := p.pushImagesToRegistry(ctx, component.Images, p.cfg.MirrorOpts.NoImgChecksum); err != nil { return fmt.Errorf("unable to push images to the registry: %w", err) } } if hasRepos { - if err := p.pushReposToRepository(componentPaths.Repos, component.Repos); err != nil { + if err := p.pushReposToRepository(ctx, componentPaths.Repos, component.Repos); err != nil { return fmt.Errorf("unable to push the repos to the repository: %w", err) } } diff --git a/src/pkg/packager/remove.go b/src/pkg/packager/remove.go index 122e811b10..fcc4de5aaa 100644 --- a/src/pkg/packager/remove.go +++ b/src/pkg/packager/remove.go @@ -5,6 +5,7 @@ package packager import ( + "context" "encoding/json" "errors" "fmt" @@ -26,7 +27,7 @@ import ( ) // Remove removes a package that was already deployed onto a cluster, uninstalling all installed helm charts. -func (p *Packager) Remove() (err error) { +func (p *Packager) Remove(ctx context.Context) (err error) { _, isClusterSource := p.source.(*sources.ClusterSource) if isClusterSource { p.cluster = p.source.(*sources.ClusterSource).Cluster @@ -70,11 +71,11 @@ func (p *Packager) Remove() (err error) { deployedPackage := &types.DeployedPackage{} if packageRequiresCluster { - err = p.connectToCluster(cluster.DefaultTimeout) + err = p.connectToCluster(ctx) if err != nil { return err } - deployedPackage, err = p.cluster.GetDeployedPackage(packageName) + deployedPackage, err = p.cluster.GetDeployedPackage(ctx, packageName) if err != nil { return fmt.Errorf("unable to load the secret for the package we are attempting to remove: %s", err.Error()) } @@ -93,7 +94,7 @@ func (p *Packager) Remove() (err error) { continue } - if deployedPackage, err = p.removeComponent(deployedPackage, dc, spinner); err != nil { + if deployedPackage, err = p.removeComponent(ctx, deployedPackage, dc, spinner); err != nil { return fmt.Errorf("unable to remove the component '%s': %w", dc.Name, err) } } @@ -101,7 +102,7 @@ func (p *Packager) Remove() (err error) { return nil } -func (p *Packager) updatePackageSecret(deployedPackage types.DeployedPackage) { +func (p *Packager) updatePackageSecret(ctx context.Context, deployedPackage types.DeployedPackage) { // Only attempt to update the package secret if we are actually connected to a cluster if p.cluster != nil { secretName := config.ZarfPackagePrefix + deployedPackage.Name @@ -113,7 +114,7 @@ func (p *Packager) updatePackageSecret(deployedPackage types.DeployedPackage) { newPackageSecretData, _ := json.Marshal(deployedPackage) newPackageSecret.Data["data"] = newPackageSecretData - _, err := p.cluster.CreateOrUpdateSecret(newPackageSecret) + _, err := p.cluster.CreateOrUpdateSecret(ctx, newPackageSecret) // We warn and ignore errors because we may have removed the cluster that this package was inside of if err != nil { @@ -122,7 +123,7 @@ func (p *Packager) updatePackageSecret(deployedPackage types.DeployedPackage) { } } -func (p *Packager) removeComponent(deployedPackage *types.DeployedPackage, deployedComponent types.DeployedComponent, spinner *message.Spinner) (*types.DeployedPackage, error) { +func (p *Packager) removeComponent(ctx context.Context, deployedPackage *types.DeployedPackage, deployedComponent types.DeployedComponent, spinner *message.Spinner) (*types.DeployedPackage, error) { components := deployedPackage.Data.Components c := helpers.Find(components, func(t types.ZarfComponent) bool { @@ -162,7 +163,7 @@ func (p *Packager) removeComponent(deployedPackage *types.DeployedPackage, deplo deployedComponent.InstalledCharts = helpers.RemoveMatches(deployedComponent.InstalledCharts, func(t types.InstalledChart) bool { return t.ChartName == chart.ChartName }) - p.updatePackageSecret(*deployedPackage) + p.updatePackageSecret(ctx, *deployedPackage) } if err := actions.Run(p.cfg, onRemove.Defaults, onRemove.After, nil); err != nil { @@ -184,19 +185,19 @@ func (p *Packager) removeComponent(deployedPackage *types.DeployedPackage, deplo secretName := config.ZarfPackagePrefix + deployedPackage.Name // All the installed components were deleted, therefore this package is no longer actually deployed - packageSecret, err := p.cluster.GetSecret(cluster.ZarfNamespaceName, secretName) + packageSecret, err := p.cluster.GetSecret(ctx, cluster.ZarfNamespaceName, secretName) // We warn and ignore errors because we may have removed the cluster that this package was inside of if err != nil { message.Warnf("Unable to delete the '%s' package secret: '%s' (this may be normal if the cluster was removed)", secretName, err.Error()) } else { - err = p.cluster.DeleteSecret(packageSecret) + err = p.cluster.DeleteSecret(ctx, packageSecret) if err != nil { message.Warnf("Unable to delete the '%s' package secret: '%s' (this may be normal if the cluster was removed)", secretName, err.Error()) } } } else { - p.updatePackageSecret(*deployedPackage) + p.updatePackageSecret(ctx, *deployedPackage) } return deployedPackage, nil diff --git a/src/pkg/packager/sources/cluster.go b/src/pkg/packager/sources/cluster.go index 1d506c6937..088b619e13 100644 --- a/src/pkg/packager/sources/cluster.go +++ b/src/pkg/packager/sources/cluster.go @@ -5,7 +5,9 @@ package sources import ( + "context" "fmt" + "time" "github.com/defenseunicorns/pkg/helpers" "github.com/defenseunicorns/zarf/src/internal/packager/validate" @@ -26,7 +28,11 @@ func NewClusterSource(pkgOpts *types.ZarfPackageOptions) (PackageSource, error) if !validate.IsLowercaseNumberHyphenNoStartHyphen(pkgOpts.PackageSource) { return nil, fmt.Errorf("invalid package name %q", pkgOpts.PackageSource) } - cluster, err := cluster.NewClusterWithWait(cluster.DefaultTimeout) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + cluster, err := cluster.NewClusterWithWait(ctx) if err != nil { return nil, err } @@ -55,7 +61,10 @@ func (s *ClusterSource) Collect(_ string) (string, error) { // LoadPackageMetadata loads package metadata from a cluster. func (s *ClusterSource) LoadPackageMetadata(dst *layout.PackagePaths, _ bool, _ bool) (types.ZarfPackage, []string, error) { - dpkg, err := s.GetDeployedPackage(s.PackageSource) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + dpkg, err := s.GetDeployedPackage(ctx, s.PackageSource) if err != nil { return types.ZarfPackage{}, nil, err } diff --git a/src/test/e2e/21_connect_creds_test.go b/src/test/e2e/21_connect_creds_test.go index 4fe560f6a8..c540629e20 100644 --- a/src/test/e2e/21_connect_creds_test.go +++ b/src/test/e2e/21_connect_creds_test.go @@ -5,6 +5,7 @@ package test import ( + "context" "crypto/tls" "fmt" "io" @@ -27,7 +28,9 @@ func TestConnectAndCreds(t *testing.T) { prevAgentSecretData, _, err := e2e.Kubectl("get", "secret", "agent-hook-tls", "-n", "zarf", "-o", "jsonpath={.data}") require.NoError(t, err) - connectToZarfServices(t) + ctx := context.TODO() + + connectToZarfServices(t, ctx) stdOut, stdErr, err := e2e.Zarf("tools", "update-creds", "--confirm") require.NoError(t, err, stdOut, stdErr) @@ -36,7 +39,7 @@ func TestConnectAndCreds(t *testing.T) { require.NoError(t, err) require.NotEqual(t, prevAgentSecretData, newAgentSecretData, "agent secrets should not be the same") - connectToZarfServices(t) + connectToZarfServices(t, ctx) stdOut, stdErr, err = e2e.Zarf("package", "remove", "init", "--components=logging", "--confirm") require.NoError(t, err, stdOut, stdErr) @@ -68,7 +71,7 @@ func TestMetrics(t *testing.T) { tunnel, err := c.NewTunnel("zarf", "svc", "agent-hook", "", 8888, 8443) require.NoError(t, err) - _, err = tunnel.Connect() + _, err = tunnel.Connect(context.TODO()) require.NoError(t, err) defer tunnel.Close() @@ -98,7 +101,7 @@ func TestMetrics(t *testing.T) { require.Equal(t, 200, resp.StatusCode) } -func connectToZarfServices(t *testing.T) { +func connectToZarfServices(t *testing.T, ctx context.Context) { // Make the Registry contains the images we expect stdOut, stdErr, err := e2e.Zarf("tools", "registry", "catalog") require.NoError(t, err, stdOut, stdErr) @@ -129,7 +132,7 @@ func connectToZarfServices(t *testing.T) { // Connect to Gitea c, err := cluster.NewCluster() require.NoError(t, err) - tunnelGit, err := c.Connect(cluster.ZarfGit) + tunnelGit, err := c.Connect(ctx, cluster.ZarfGit) require.NoError(t, err) defer tunnelGit.Close() @@ -150,7 +153,7 @@ func connectToZarfServices(t *testing.T) { // Connect to the Logging Stack c, err = cluster.NewCluster() require.NoError(t, err) - tunnelLog, err := c.Connect(cluster.ZarfLogging) + tunnelLog, err := c.Connect(ctx, cluster.ZarfLogging) require.NoError(t, err) defer tunnelLog.Close() diff --git a/src/test/e2e/22_git_and_gitops_test.go b/src/test/e2e/22_git_and_gitops_test.go index f83bd6ba05..f9e9ccc2fe 100644 --- a/src/test/e2e/22_git_and_gitops_test.go +++ b/src/test/e2e/22_git_and_gitops_test.go @@ -5,6 +5,7 @@ package test import ( + "context" "encoding/base64" "encoding/json" "fmt" @@ -35,13 +36,14 @@ func TestGit(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnelGit, err := c.Connect(cluster.ZarfGit) + ctx := context.TODO() + tunnelGit, err := c.Connect(ctx, cluster.ZarfGit) require.NoError(t, err) defer tunnelGit.Close() testGitServerConnect(t, tunnelGit.HTTPEndpoint()) - testGitServerReadOnly(t, tunnelGit.HTTPEndpoint()) - testGitServerTagAndHash(t, tunnelGit.HTTPEndpoint()) + testGitServerReadOnly(t, ctx, tunnelGit.HTTPEndpoint()) + testGitServerTagAndHash(t, ctx, tunnelGit.HTTPEndpoint()) } func TestGitOpsFlux(t *testing.T) { @@ -65,9 +67,9 @@ func testGitServerConnect(t *testing.T, gitURL string) { require.Equal(t, 200, resp.StatusCode) } -func testGitServerReadOnly(t *testing.T, gitURL string) { +func testGitServerReadOnly(t *testing.T, ctx context.Context, gitURL string) { // Init the state variable - state, err := cluster.NewClusterOrDie().LoadZarfState() + state, err := cluster.NewClusterOrDie(ctx).LoadZarfState(ctx) require.NoError(t, err) gitCfg := git.New(state.GitServer) @@ -88,9 +90,9 @@ func testGitServerReadOnly(t *testing.T, gitURL string) { require.True(t, permissionsMap["pull"].(bool)) } -func testGitServerTagAndHash(t *testing.T, gitURL string) { +func testGitServerTagAndHash(t *testing.T, ctx context.Context, gitURL string) { // Init the state variable - state, err := cluster.NewClusterOrDie().LoadZarfState() + state, err := cluster.NewClusterOrDie(ctx).LoadZarfState(ctx) require.NoError(t, err, "Failed to load Zarf state") repoName := "zarf-public-test-2469062884" diff --git a/src/test/e2e/23_data_injection_test.go b/src/test/e2e/23_data_injection_test.go index ee65c49c0a..ce986859ee 100644 --- a/src/test/e2e/23_data_injection_test.go +++ b/src/test/e2e/23_data_injection_test.go @@ -42,7 +42,7 @@ func TestDataInjection(t *testing.T) { // need target to equal svc that we are trying to connect to call checkForZarfConnectLabel c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect("kiwix") + tunnel, err := c.Connect(context.TODO(), "kiwix") require.NoError(t, err) defer tunnel.Close() diff --git a/src/test/e2e/26_simple_packages_test.go b/src/test/e2e/26_simple_packages_test.go index 996982d383..25fa7115e9 100644 --- a/src/test/e2e/26_simple_packages_test.go +++ b/src/test/e2e/26_simple_packages_test.go @@ -5,6 +5,7 @@ package test import ( + "context" "fmt" "net/http" "path/filepath" @@ -26,7 +27,7 @@ func TestDosGames(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect("doom") + tunnel, err := c.Connect(context.TODO(), "doom") require.NoError(t, err) defer tunnel.Close() diff --git a/src/test/e2e/99_yolo_test.go b/src/test/e2e/99_yolo_test.go index a4044c53a9..1c843501a3 100644 --- a/src/test/e2e/99_yolo_test.go +++ b/src/test/e2e/99_yolo_test.go @@ -5,6 +5,7 @@ package test import ( + "context" "fmt" "net/http" "testing" @@ -35,7 +36,7 @@ func TestYOLOMode(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect("doom") + tunnel, err := c.Connect(context.TODO(), "doom") require.NoError(t, err) defer tunnel.Close() diff --git a/src/test/external/ext_in_cluster_test.go b/src/test/external/ext_in_cluster_test.go index a0a4aae7c4..3338b5474f 100644 --- a/src/test/external/ext_in_cluster_test.go +++ b/src/test/external/ext_in_cluster_test.go @@ -81,10 +81,12 @@ func (suite *ExtInClusterTestSuite) Test_0_Mirror() { c, err := cluster.NewCluster() suite.NoError(err) + ctx := context.TODO() + // Check that the registry contains the images we want tunnelReg, err := c.NewTunnel("external-registry", "svc", "external-registry-docker-registry", "", 0, 5000) suite.NoError(err) - _, err = tunnelReg.Connect() + _, err = tunnelReg.Connect(ctx) suite.NoError(err) defer tunnelReg.Close() @@ -101,7 +103,7 @@ func (suite *ExtInClusterTestSuite) Test_0_Mirror() { tunnelGit, err := c.NewTunnel("git-server", "svc", "gitea-http", "", 0, 3000) suite.NoError(err) - _, err = tunnelGit.Connect() + _, err = tunnelGit.Connect(ctx) suite.NoError(err) defer tunnelGit.Close() diff --git a/src/test/external/ext_out_cluster_test.go b/src/test/external/ext_out_cluster_test.go index 7d086bc137..2fd4178cf3 100644 --- a/src/test/external/ext_out_cluster_test.go +++ b/src/test/external/ext_out_cluster_test.go @@ -174,7 +174,8 @@ func (suite *ExtOutClusterTestSuite) Test_2_AuthToPrivateHelmChart() { URL: chartURL, } repoFile.Add(entry) - utils.WriteYaml(repoPath, repoFile, helpers.ReadWriteUser) + err = utils.WriteYaml(repoPath, repoFile, helpers.ReadWriteUser) + suite.NoError(err) err = exec.CmdWithPrint(zarfBinPath, findImageArgs...) suite.NoError(err, "Unable to find images, helm auth likely failed") @@ -192,7 +193,8 @@ func (suite *ExtOutClusterTestSuite) createHelmChartInGitea(baseURL string, user podinfoTarballPath := filepath.Join(tempDir, fmt.Sprintf("podinfo-%s.tgz", podInfoVersion)) suite.NoError(err, "Unable to package chart") - utils.DownloadToFile(fmt.Sprintf("https://stefanprodan.github.io/podinfo/podinfo-%s.tgz", podInfoVersion), podinfoTarballPath, "") + err = utils.DownloadToFile(fmt.Sprintf("https://stefanprodan.github.io/podinfo/podinfo-%s.tgz", podInfoVersion), podinfoTarballPath, "") + suite.NoError(err) url := fmt.Sprintf("%s/api/packages/%s/helm/api/charts", baseURL, username) file, err := os.Open(podinfoTarballPath) From c74e6347a915a34fae4122a9839c3adde66b9504 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 26 Mar 2024 17:19:55 -0500 Subject: [PATCH 02/62] Simplify getImagesAndNodesForInjection --- src/pkg/cluster/injector.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/pkg/cluster/injector.go b/src/pkg/cluster/injector.go index 4a7c872ee5..831feb259a 100644 --- a/src/pkg/cluster/injector.go +++ b/src/pkg/cluster/injector.go @@ -441,7 +441,7 @@ func (c *Cluster) getImagesAndNodesForInjection(ctx context.Context) (imageNodeM select { case <-ctx.Done(): return nil, fmt.Errorf("get image list timed-out: %w", ctx.Err()) - default: + case <-time.After(2 * time.Second): pods, err := c.GetPods(ctx, corev1.NamespaceAll) if err != nil { return nil, fmt.Errorf("unable to get the list of pods in the cluster: %w", err) @@ -487,12 +487,6 @@ func (c *Cluster) getImagesAndNodesForInjection(ctx context.Context) (imageNodeM } c.Log("No images found on any node. Retrying...") - - select { - case <-ctx.Done(): - return nil, fmt.Errorf("get image list cancelled or timed out while waiting to retry: %w", ctx.Err()) - case <-time.After(2 * time.Second): - } } } } From 3cdec09654d63e8d4dea299a49131f264f3cfddf Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 26 Mar 2024 17:21:12 -0500 Subject: [PATCH 03/62] Make ctx the first param in testConnection --- src/extensions/bigbang/test/bigbang_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/extensions/bigbang/test/bigbang_test.go b/src/extensions/bigbang/test/bigbang_test.go index be09a3000b..db929c5410 100644 --- a/src/extensions/bigbang/test/bigbang_test.go +++ b/src/extensions/bigbang/test/bigbang_test.go @@ -117,10 +117,10 @@ func TestReleases(t *testing.T) { require.NoError(t, err, stdOut, stdErr) // Test connectivity to Twistlock - testConnection(t, context.TODO()) + testConnection(context.TODO(), t) } -func testConnection(t *testing.T, ctx context.Context) { +func testConnection(ctx context.Context, t *testing.T) { // Establish the tunnel config c, err := cluster.NewCluster() require.NoError(t, err) From 57c70a69269151668e593fd00d0b9c94d0802119 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 26 Mar 2024 17:31:27 -0500 Subject: [PATCH 04/62] Make ctx the first param in connectToZarfServices --- src/test/e2e/21_connect_creds_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/test/e2e/21_connect_creds_test.go b/src/test/e2e/21_connect_creds_test.go index c540629e20..f742e9e80a 100644 --- a/src/test/e2e/21_connect_creds_test.go +++ b/src/test/e2e/21_connect_creds_test.go @@ -30,7 +30,7 @@ func TestConnectAndCreds(t *testing.T) { ctx := context.TODO() - connectToZarfServices(t, ctx) + connectToZarfServices(ctx, t) stdOut, stdErr, err := e2e.Zarf("tools", "update-creds", "--confirm") require.NoError(t, err, stdOut, stdErr) @@ -39,7 +39,7 @@ func TestConnectAndCreds(t *testing.T) { require.NoError(t, err) require.NotEqual(t, prevAgentSecretData, newAgentSecretData, "agent secrets should not be the same") - connectToZarfServices(t, ctx) + connectToZarfServices(ctx, t) stdOut, stdErr, err = e2e.Zarf("package", "remove", "init", "--components=logging", "--confirm") require.NoError(t, err, stdOut, stdErr) @@ -101,7 +101,7 @@ func TestMetrics(t *testing.T) { require.Equal(t, 200, resp.StatusCode) } -func connectToZarfServices(t *testing.T, ctx context.Context) { +func connectToZarfServices(ctx context.Context, t *testing.T) { // Make the Registry contains the images we expect stdOut, stdErr, err := e2e.Zarf("tools", "registry", "catalog") require.NoError(t, err, stdOut, stdErr) From 418dbeb4af06e8106808855931d5816bc49dfce6 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 26 Mar 2024 17:33:30 -0500 Subject: [PATCH 05/62] Make ctx the first param in testGitServerReadOnly and testGitServerTagAndHash --- src/test/e2e/22_git_and_gitops_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/test/e2e/22_git_and_gitops_test.go b/src/test/e2e/22_git_and_gitops_test.go index f9e9ccc2fe..1105d4f716 100644 --- a/src/test/e2e/22_git_and_gitops_test.go +++ b/src/test/e2e/22_git_and_gitops_test.go @@ -42,8 +42,8 @@ func TestGit(t *testing.T) { defer tunnelGit.Close() testGitServerConnect(t, tunnelGit.HTTPEndpoint()) - testGitServerReadOnly(t, ctx, tunnelGit.HTTPEndpoint()) - testGitServerTagAndHash(t, ctx, tunnelGit.HTTPEndpoint()) + testGitServerReadOnly(ctx, t, tunnelGit.HTTPEndpoint()) + testGitServerTagAndHash(ctx, t, tunnelGit.HTTPEndpoint()) } func TestGitOpsFlux(t *testing.T) { @@ -67,7 +67,7 @@ func testGitServerConnect(t *testing.T, gitURL string) { require.Equal(t, 200, resp.StatusCode) } -func testGitServerReadOnly(t *testing.T, ctx context.Context, gitURL string) { +func testGitServerReadOnly(ctx context.Context, t *testing.T, gitURL string) { // Init the state variable state, err := cluster.NewClusterOrDie(ctx).LoadZarfState(ctx) require.NoError(t, err) @@ -90,7 +90,7 @@ func testGitServerReadOnly(t *testing.T, ctx context.Context, gitURL string) { require.True(t, permissionsMap["pull"].(bool)) } -func testGitServerTagAndHash(t *testing.T, ctx context.Context, gitURL string) { +func testGitServerTagAndHash(ctx context.Context, t *testing.T, gitURL string) { // Init the state variable state, err := cluster.NewClusterOrDie(ctx).LoadZarfState(ctx) require.NoError(t, err, "Failed to load Zarf state") From f074a951ffaf99effec8a94e2b8c8c7071ff9002 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Wed, 27 Mar 2024 09:47:36 -0500 Subject: [PATCH 06/62] Fix component webhooks test --- src/test/e2e/33_component_webhooks_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/e2e/33_component_webhooks_test.go b/src/test/e2e/33_component_webhooks_test.go index 349e0d0770..cbb8321b08 100644 --- a/src/test/e2e/33_component_webhooks_test.go +++ b/src/test/e2e/33_component_webhooks_test.go @@ -27,12 +27,12 @@ func TestComponentWebhooks(t *testing.T) { gamesPath := fmt.Sprintf("build/zarf-package-dos-games-%s-1.0.0.tar.zst", e2e.Arch) stdOut, stdErr, err = e2e.Zarf("package", "deploy", gamesPath, "--confirm") require.NoError(t, err, stdOut, stdErr) - require.Contains(t, stdErr, "Waiting for webhook 'test-webhook' to complete for component 'baseline'") + require.Contains(t, stdErr, "Waiting for webhook \"test-webhook\" to complete for component \"baseline\"") // Ensure package deployments with the '--skip-webhooks' flag do not wait on webhooks to complete. stdOut, stdErr, err = e2e.Zarf("package", "deploy", gamesPath, "--skip-webhooks", "--confirm") require.NoError(t, err, stdOut, stdErr) - require.NotContains(t, stdErr, "Waiting for webhook 'test-webhook' to complete for component 'baseline'") + require.NotContains(t, stdErr, "Waiting for webhook \"test-webhook\" to complete for component \"baseline\"") // Remove the Pepr webhook package. stdOut, stdErr, err = e2e.Zarf("package", "remove", "component-webhooks", "--confirm") From d336924f0f43c25107401995b2f589c76c2d0479 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Wed, 27 Mar 2024 10:19:51 -0500 Subject: [PATCH 07/62] Set ctx timeout for connectToCluster to 30 seconds --- src/pkg/packager/deploy.go | 12 ++++++++++-- src/pkg/packager/remove.go | 4 +++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/pkg/packager/deploy.go b/src/pkg/packager/deploy.go index f7da48e34f..4d60fbfc6a 100644 --- a/src/pkg/packager/deploy.go +++ b/src/pkg/packager/deploy.go @@ -144,7 +144,13 @@ func (p *Packager) deployComponents(ctx context.Context) (deployedComponents []t // If this component requires a cluster, connect to one if component.RequiresCluster() { - if err := p.connectToCluster(ctx); err != nil { + timeout := cluster.DefaultTimeout + if p.cfg.Pkg.IsInitConfig() { + timeout = 5 * time.Minute + } + connectCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + if err := p.connectToCluster(connectCtx); err != nil { return deployedComponents, fmt.Errorf("unable to connect to the Kubernetes cluster: %w", err) } } @@ -510,7 +516,9 @@ func (p *Packager) pushReposToRepository(ctx context.Context, reposPath string, // If this is a service (svcInfo is not nil), create a port-forward tunnel to that resource if svcInfo != nil { if !p.isConnectedToCluster() { - err := p.connectToCluster(ctx) + connectCtx, cancel := context.WithTimeout(ctx, cluster.DefaultTimeout) + defer cancel() + err := p.connectToCluster(connectCtx) if err != nil { return err } diff --git a/src/pkg/packager/remove.go b/src/pkg/packager/remove.go index fcc4de5aaa..fe475744c1 100644 --- a/src/pkg/packager/remove.go +++ b/src/pkg/packager/remove.go @@ -71,7 +71,9 @@ func (p *Packager) Remove(ctx context.Context) (err error) { deployedPackage := &types.DeployedPackage{} if packageRequiresCluster { - err = p.connectToCluster(ctx) + connectCtx, cancel := context.WithTimeout(ctx, cluster.DefaultTimeout) + defer cancel() + err = p.connectToCluster(connectCtx) if err != nil { return err } From e96a397061ec6d103e68cc46e115f21299721c9c Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Wed, 27 Mar 2024 10:22:10 -0500 Subject: [PATCH 08/62] Go back to 5 second timeout to connect to cluster in pushReposToRepository --- src/pkg/packager/deploy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pkg/packager/deploy.go b/src/pkg/packager/deploy.go index 4d60fbfc6a..70de6b9bc3 100644 --- a/src/pkg/packager/deploy.go +++ b/src/pkg/packager/deploy.go @@ -516,7 +516,7 @@ func (p *Packager) pushReposToRepository(ctx context.Context, reposPath string, // If this is a service (svcInfo is not nil), create a port-forward tunnel to that resource if svcInfo != nil { if !p.isConnectedToCluster() { - connectCtx, cancel := context.WithTimeout(ctx, cluster.DefaultTimeout) + connectCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() err := p.connectToCluster(connectCtx) if err != nil { From 59c4282958fb35c99bb6eeb8c40fd5e35c9289e2 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Wed, 27 Mar 2024 10:24:26 -0500 Subject: [PATCH 09/62] Use DefaultTimeout in NewClusterSource --- src/pkg/packager/sources/cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pkg/packager/sources/cluster.go b/src/pkg/packager/sources/cluster.go index 088b619e13..f236b71c0c 100644 --- a/src/pkg/packager/sources/cluster.go +++ b/src/pkg/packager/sources/cluster.go @@ -29,7 +29,7 @@ func NewClusterSource(pkgOpts *types.ZarfPackageOptions) (PackageSource, error) return nil, fmt.Errorf("invalid package name %q", pkgOpts.PackageSource) } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() cluster, err := cluster.NewClusterWithWait(ctx) From 2ddf7f90424890fca57a0d8207e3c6e7f0952b1a Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Wed, 27 Mar 2024 10:25:21 -0500 Subject: [PATCH 10/62] Use DefaultTimeout in LoadPackageMetadata --- src/pkg/packager/sources/cluster.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/pkg/packager/sources/cluster.go b/src/pkg/packager/sources/cluster.go index f236b71c0c..80f6b6007a 100644 --- a/src/pkg/packager/sources/cluster.go +++ b/src/pkg/packager/sources/cluster.go @@ -7,7 +7,6 @@ package sources import ( "context" "fmt" - "time" "github.com/defenseunicorns/pkg/helpers" "github.com/defenseunicorns/zarf/src/internal/packager/validate" @@ -61,7 +60,7 @@ func (s *ClusterSource) Collect(_ string) (string, error) { // LoadPackageMetadata loads package metadata from a cluster. func (s *ClusterSource) LoadPackageMetadata(dst *layout.PackagePaths, _ bool, _ bool) (types.ZarfPackage, []string, error) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() dpkg, err := s.GetDeployedPackage(ctx, s.PackageSource) From 323c5e0d7d4a9bde53c2ef489cde4dafc46a2cd1 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Wed, 27 Mar 2024 10:32:42 -0500 Subject: [PATCH 11/62] Replace 30 second timeouts with DefaultTimeout --- src/cmd/connect.go | 10 ++++++---- src/cmd/destroy.go | 2 +- src/cmd/internal.go | 7 +++---- src/cmd/package.go | 6 +++--- src/cmd/tools/crane.go | 7 +++---- src/cmd/tools/helm/repo_add.go | 3 ++- src/cmd/tools/zarf.go | 5 ++--- src/internal/packager/helm/post-render.go | 4 ++-- src/pkg/k8s/pods.go | 3 ++- 9 files changed, 24 insertions(+), 23 deletions(-) diff --git a/src/cmd/connect.go b/src/cmd/connect.go index 991470ee89..de21c62fc6 100644 --- a/src/cmd/connect.go +++ b/src/cmd/connect.go @@ -10,7 +10,6 @@ import ( "os" "os/signal" "syscall" - "time" "github.com/defenseunicorns/zarf/src/cmd/common" "github.com/defenseunicorns/zarf/src/config/lang" @@ -45,7 +44,7 @@ var ( spinner.Fatalf(err, lang.CmdConnectErrCluster, err.Error()) } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() var tunnel *k8s.Tunnel @@ -96,9 +95,12 @@ var ( Aliases: []string{"l"}, Short: lang.CmdConnectListShort, Run: func(_ *cobra.Command, _ []string) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() - cluster.NewClusterOrDie(ctx).PrintConnectTable(ctx) + err := cluster.NewClusterOrDie(ctx).PrintConnectTable(ctx) + if err != nil { + message.Fatal(err, err.Error()) + } }, } ) diff --git a/src/cmd/destroy.go b/src/cmd/destroy.go index 56784b2df5..643c9c0f93 100644 --- a/src/cmd/destroy.go +++ b/src/cmd/destroy.go @@ -34,7 +34,7 @@ var destroyCmd = &cobra.Command{ ctxLong, cancelLong := context.WithTimeout(context.Background(), 5*time.Minute) defer cancelLong() - ctxShort, cancelShort := context.WithTimeout(context.Background(), 30*time.Second) + ctxShort, cancelShort := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancelShort() c, err := cluster.NewClusterWithWait(ctxShort) diff --git a/src/cmd/internal.go b/src/cmd/internal.go index 96078d5c70..5ead1c5d3d 100644 --- a/src/cmd/internal.go +++ b/src/cmd/internal.go @@ -9,7 +9,6 @@ import ( "encoding/json" "fmt" "os" - "time" "github.com/alecthomas/jsonschema" "github.com/defenseunicorns/pkg/helpers" @@ -163,7 +162,7 @@ var createReadOnlyGiteaUser = &cobra.Command{ Short: lang.CmdInternalCreateReadOnlyGiteaUserShort, Long: lang.CmdInternalCreateReadOnlyGiteaUserLong, Run: func(_ *cobra.Command, _ []string) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() // Load the state so we can get the credentials for the admin git user @@ -184,7 +183,7 @@ var createPackageRegistryToken = &cobra.Command{ Short: lang.CmdInternalArtifactRegistryGiteaTokenShort, Long: lang.CmdInternalArtifactRegistryGiteaTokenLong, Run: func(_ *cobra.Command, _ []string) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() // Load the state so we can get the credentials for the admin git user @@ -213,7 +212,7 @@ var updateGiteaPVC = &cobra.Command{ Short: lang.CmdInternalUpdateGiteaPVCShort, Long: lang.CmdInternalUpdateGiteaPVCLong, Run: func(_ *cobra.Command, _ []string) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() // There is a possibility that the pvc does not yet exist and Gitea helm chart should create it diff --git a/src/cmd/package.go b/src/cmd/package.go index 6378556621..0da391aece 100644 --- a/src/cmd/package.go +++ b/src/cmd/package.go @@ -110,7 +110,7 @@ var packageMirrorCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() if err := pkgClient.Mirror(ctx); err != nil { @@ -147,7 +147,7 @@ var packageListCmd = &cobra.Command{ Aliases: []string{"l", "ls"}, Short: lang.CmdPackageListShort, Run: func(_ *cobra.Command, _ []string) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() // Get all the deployed packages @@ -309,7 +309,7 @@ func getPackageCompletionArgs(_ *cobra.Command, _ []string, _ string) ([]string, return pkgCandidates, cobra.ShellCompDirectiveDefault } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() // Get all the deployed packages diff --git a/src/cmd/tools/crane.go b/src/cmd/tools/crane.go index 7485754b7f..b78f961847 100644 --- a/src/cmd/tools/crane.go +++ b/src/cmd/tools/crane.go @@ -9,7 +9,6 @@ import ( "fmt" "os" "strings" - "time" "github.com/AlecAivazis/survey/v2" "github.com/defenseunicorns/zarf/src/cmd/common" @@ -125,7 +124,7 @@ func zarfCraneCatalog(cranePlatformOptions *[]crane.Option) *cobra.Command { return err } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() // Load Zarf state @@ -177,7 +176,7 @@ func zarfCraneInternalWrapper(commandToWrap func(*[]crane.Option) *cobra.Command message.Note(lang.CmdToolsRegistryZarfState) - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() // Load the state (if able) @@ -225,7 +224,7 @@ func pruneImages(_ *cobra.Command, _ []string) error { return err } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() // Load the state diff --git a/src/cmd/tools/helm/repo_add.go b/src/cmd/tools/helm/repo_add.go index d053689000..7e6b4a52ab 100644 --- a/src/cmd/tools/helm/repo_add.go +++ b/src/cmd/tools/helm/repo_add.go @@ -31,6 +31,7 @@ import ( "time" "github.com/defenseunicorns/pkg/helpers" + "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/gofrs/flock" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -128,7 +129,7 @@ func (o *repoAddOptions) run(out io.Writer) error { lockPath = o.repoFile + ".lock" } fileLock := flock.New(lockPath) - lockCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + lockCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() locked, err := fileLock.TryLockContext(lockCtx, time.Second) if err == nil && locked { diff --git a/src/cmd/tools/zarf.go b/src/cmd/tools/zarf.go index 2efd582aea..ef18be1e5b 100644 --- a/src/cmd/tools/zarf.go +++ b/src/cmd/tools/zarf.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "os" - "time" "slices" @@ -53,7 +52,7 @@ var getCredsCmd = &cobra.Command{ Aliases: []string{"gc"}, Args: cobra.MaximumNArgs(1), Run: func(_ *cobra.Command, args []string) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() state, err := cluster.NewClusterOrDie(ctx).LoadZarfState(ctx) @@ -89,7 +88,7 @@ var updateCredsCmd = &cobra.Command{ } } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() c := cluster.NewClusterOrDie(ctx) diff --git a/src/internal/packager/helm/post-render.go b/src/internal/packager/helm/post-render.go index 169dc8f86d..ccb3446a97 100644 --- a/src/internal/packager/helm/post-render.go +++ b/src/internal/packager/helm/post-render.go @@ -11,11 +11,11 @@ import ( "os" "path/filepath" "reflect" - "time" "github.com/defenseunicorns/pkg/helpers" "github.com/defenseunicorns/zarf/src/config" "github.com/defenseunicorns/zarf/src/internal/packager/template" + "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/defenseunicorns/zarf/src/pkg/utils" "github.com/defenseunicorns/zarf/src/types" @@ -96,7 +96,7 @@ func (r *renderer) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) { finalManifestsOutput := bytes.NewBuffer(nil) if r.cluster != nil { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() if err := r.editHelmResources(ctx, resources, finalManifestsOutput); err != nil { diff --git a/src/pkg/k8s/pods.go b/src/pkg/k8s/pods.go index 4739c2c2b4..ba61bf4f83 100644 --- a/src/pkg/k8s/pods.go +++ b/src/pkg/k8s/pods.go @@ -10,6 +10,7 @@ import ( "time" "github.com/defenseunicorns/pkg/helpers" + "github.com/defenseunicorns/zarf/src/pkg/cluster" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -97,7 +98,7 @@ func (k *K8s) GetPods(ctx context.Context, namespace string) (*corev1.PodList, e // It will wait up to 30 seconds for the pods to be found and will return a list of matching pod names // If the timeout is reached, an empty list will be returned. func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, include PodFilter) []corev1.Pod { - waitCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + waitCtx, cancel := context.WithTimeout(ctx, cluster.DefaultTimeout) defer cancel() for { From db44603edc603a7243d461d5db3ca261ecf70f86 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Wed, 27 Mar 2024 10:37:37 -0500 Subject: [PATCH 12/62] Fix import cycle --- src/pkg/k8s/pods.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/pkg/k8s/pods.go b/src/pkg/k8s/pods.go index ba61bf4f83..4739c2c2b4 100644 --- a/src/pkg/k8s/pods.go +++ b/src/pkg/k8s/pods.go @@ -10,7 +10,6 @@ import ( "time" "github.com/defenseunicorns/pkg/helpers" - "github.com/defenseunicorns/zarf/src/pkg/cluster" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -98,7 +97,7 @@ func (k *K8s) GetPods(ctx context.Context, namespace string) (*corev1.PodList, e // It will wait up to 30 seconds for the pods to be found and will return a list of matching pod names // If the timeout is reached, an empty list will be returned. func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, include PodFilter) []corev1.Pod { - waitCtx, cancel := context.WithTimeout(ctx, cluster.DefaultTimeout) + waitCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() for { From f405049df1f1ac6aeff03784b0d963ad6fda93ad Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 11:31:46 -0500 Subject: [PATCH 13/62] Do not reuse context in destroy ops --- src/cmd/destroy.go | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/src/cmd/destroy.go b/src/cmd/destroy.go index 643c9c0f93..b81bd59c11 100644 --- a/src/cmd/destroy.go +++ b/src/cmd/destroy.go @@ -31,21 +31,21 @@ var destroyCmd = &cobra.Command{ Short: lang.CmdDestroyShort, Long: lang.CmdDestroyLong, Run: func(_ *cobra.Command, _ []string) { - ctxLong, cancelLong := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancelLong() + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + defer cancel() - ctxShort, cancelShort := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancelShort() - - c, err := cluster.NewClusterWithWait(ctxShort) + c, err := cluster.NewClusterWithWait(ctx) if err != nil { message.Fatalf(err, lang.ErrNoClusterConnection) } + ctx, cancel = context.WithTimeout(context.Background(), cluster.DefaultTimeout) + defer cancel() + // NOTE: If 'zarf init' failed to deploy the k3s component (or if we're looking at the wrong kubeconfig) // there will be no zarf-state to load and the struct will be empty. In these cases, if we can find // the scripts to remove k3s, we will still try to remove a locally installed k3s cluster - state, err := c.LoadZarfState(ctxShort) + state, err := c.LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) } @@ -81,11 +81,19 @@ var destroyCmd = &cobra.Command{ // Perform chart uninstallation helm.Destroy(removeComponents) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + // If Zarf didn't deploy the cluster, only delete the ZarfNamespace - c.DeleteZarfNamespace(ctxLong) + if err := c.DeleteZarfNamespace(ctx); err != nil { + message.Fatal(err, err.Error()) + } + + ctx, cancel = context.WithTimeout(context.Background(), cluster.DefaultTimeout) + defer cancel() // Remove zarf agent labels and secrets from namespaces Zarf doesn't manage - c.StripZarfLabelsAndSecretsFromNamespaces(ctxLong) + c.StripZarfLabelsAndSecretsFromNamespaces(ctx) } }, } From ca072275ff439dd64f3010a5af5e57de9570d7a7 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 11:42:06 -0500 Subject: [PATCH 14/62] Do not reuse context in connect ops --- src/cmd/connect.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/cmd/connect.go b/src/cmd/connect.go index de21c62fc6..aff77445b6 100644 --- a/src/cmd/connect.go +++ b/src/cmd/connect.go @@ -44,14 +44,15 @@ var ( spinner.Fatalf(err, lang.CmdConnectErrCluster, err.Error()) } - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() - var tunnel *k8s.Tunnel if connectResourceName != "" { zt := cluster.NewTunnelInfo(connectNamespace, connectResourceType, connectResourceName, "", connectLocalPort, connectRemotePort) + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + defer cancel() tunnel, err = c.ConnectTunnelInfo(ctx, zt) } else { + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + defer cancel() tunnel, err = c.Connect(ctx, target) } if err != nil { @@ -95,10 +96,11 @@ var ( Aliases: []string{"l"}, Short: lang.CmdConnectListShort, Run: func(_ *cobra.Command, _ []string) { - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + clusterCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() - err := cluster.NewClusterOrDie(ctx).PrintConnectTable(ctx) - if err != nil { + connectCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + defer cancel() + if err := cluster.NewClusterOrDie(clusterCtx).PrintConnectTable(connectCtx); err != nil { message.Fatal(err, err.Error()) } }, From 32ca85a678ebae39c7efea0acc2b24c7ab0b5944 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 11:49:13 -0500 Subject: [PATCH 15/62] Do not set timeout for DevDeploy and handle errors --- src/cmd/dev.go | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/src/cmd/dev.go b/src/cmd/dev.go index c535b1f69a..078b2d0f83 100644 --- a/src/cmd/dev.go +++ b/src/cmd/dev.go @@ -11,7 +11,6 @@ import ( "os" "path/filepath" "strings" - "time" "github.com/AlecAivazis/survey/v2" "github.com/defenseunicorns/pkg/helpers" @@ -56,10 +55,7 @@ var devDeployCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) - defer cancel() - - // Create the package + ctx := context.Background() if err := pkgClient.DevDeploy(ctx); err != nil { message.Fatalf(err, lang.CmdDevDeployErr, err.Error()) } @@ -297,8 +293,14 @@ func init() { // use the package create config for this and reset it here to avoid overwriting the config.CreateOptions.SetVariables devFindImagesCmd.Flags().StringToStringVar(&pkgConfig.CreateOpts.SetVariables, "set", v.GetStringMapString(common.VPkgCreateSet), lang.CmdDevFlagSet) - devFindImagesCmd.Flags().MarkDeprecated("set", "this field is replaced by create-set") - devFindImagesCmd.Flags().MarkHidden("set") + err := devFindImagesCmd.Flags().MarkDeprecated("set", "this field is replaced by create-set") + if err != nil { + message.Fatal(err, err.Error()) + } + err = devFindImagesCmd.Flags().MarkHidden("set") + if err != nil { + message.Fatal(err, err.Error()) + } devFindImagesCmd.Flags().StringVarP(&pkgConfig.CreateOpts.Flavor, "flavor", "f", v.GetString(common.VPkgCreateFlavor), lang.CmdPackageCreateFlagFlavor) devFindImagesCmd.Flags().StringToStringVar(&pkgConfig.CreateOpts.SetVariables, "create-set", v.GetStringMapString(common.VPkgCreateSet), lang.CmdDevFlagSet) devFindImagesCmd.Flags().StringToStringVar(&pkgConfig.PkgOpts.SetVariables, "deploy-set", v.GetStringMapString(common.VPkgDeploySet), lang.CmdPackageDeployFlagSet) @@ -346,7 +348,16 @@ func bindDevGenerateFlags(_ *viper.Viper) { generateFlags.StringVar(&pkgConfig.GenerateOpts.Output, "output-directory", "", "Output directory for the generated zarf.yaml") generateFlags.StringVar(&pkgConfig.FindImagesOpts.KubeVersionOverride, "kube-version", "", lang.CmdDevFlagKubeVersion) - devGenerateCmd.MarkFlagRequired("url") - devGenerateCmd.MarkFlagRequired("version") - devGenerateCmd.MarkFlagRequired("output-directory") + err := devGenerateCmd.MarkFlagRequired("url") + if err != nil { + message.Fatal(err, err.Error()) + } + err = devGenerateCmd.MarkFlagRequired("version") + if err != nil { + message.Fatal(err, err.Error()) + } + err = devGenerateCmd.MarkFlagRequired("output-directory") + if err != nil { + message.Fatal(err, err.Error()) + } } From e7e2e99e7ef650cbb284d45a500ecacf53d6b670 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 11:53:59 -0500 Subject: [PATCH 16/62] Do not set timeout for init and remove comments --- src/cmd/dev.go | 6 +----- src/cmd/initialize.go | 7 +------ 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/src/cmd/dev.go b/src/cmd/dev.go index 078b2d0f83..65560e8452 100644 --- a/src/cmd/dev.go +++ b/src/cmd/dev.go @@ -51,11 +51,11 @@ var devDeployCmd = &cobra.Command{ pkgConfig.PkgOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgDeploySet), pkgConfig.PkgOpts.SetVariables, strings.ToUpper) - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() ctx := context.Background() + if err := pkgClient.DevDeploy(ctx); err != nil { message.Fatalf(err, lang.CmdDevDeployErr, err.Error()) } @@ -210,19 +210,15 @@ var devFindImagesCmd = &cobra.Command{ Run: func(_ *cobra.Command, args []string) { pkgConfig.CreateOpts.BaseDir = common.SetBaseDirectory(args) - // Ensure uppercase keys from viper v := common.GetViper() pkgConfig.CreateOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgCreateSet), pkgConfig.CreateOpts.SetVariables, strings.ToUpper) pkgConfig.PkgOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgDeploySet), pkgConfig.PkgOpts.SetVariables, strings.ToUpper) - - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Find all the images the package might need if _, err := pkgClient.FindImages(); err != nil { message.Fatalf(err, lang.CmdDevFindImagesErr, err.Error()) } diff --git a/src/cmd/initialize.go b/src/cmd/initialize.go index 50cb6f61f8..f076e05536 100644 --- a/src/cmd/initialize.go +++ b/src/cmd/initialize.go @@ -12,7 +12,6 @@ import ( "path" "path/filepath" "strings" - "time" "github.com/AlecAivazis/survey/v2" "github.com/defenseunicorns/pkg/helpers" @@ -60,19 +59,15 @@ var initCmd = &cobra.Command{ message.Fatal(err, err.Error()) } - // Ensure uppercase keys from viper v := common.GetViper() pkgConfig.PkgOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgDeploySet), pkgConfig.PkgOpts.SetVariables, strings.ToUpper) - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig, packager.WithSource(src)) defer pkgClient.ClearTempPaths() - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) - defer cancel() + ctx := context.Background() - // Deploy everything err = pkgClient.Deploy(ctx) if err != nil { message.Fatal(err, err.Error()) From d8e2a1fa74d6963e61dea21743baa8a4d018ff40 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 11:58:37 -0500 Subject: [PATCH 17/62] Do not set timeout for connect --- src/cmd/connect.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/cmd/connect.go b/src/cmd/connect.go index aff77445b6..e3a22c8ad5 100644 --- a/src/cmd/connect.go +++ b/src/cmd/connect.go @@ -44,15 +44,13 @@ var ( spinner.Fatalf(err, lang.CmdConnectErrCluster, err.Error()) } + ctx := context.Background() + var tunnel *k8s.Tunnel if connectResourceName != "" { zt := cluster.NewTunnelInfo(connectNamespace, connectResourceType, connectResourceName, "", connectLocalPort, connectRemotePort) - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() tunnel, err = c.ConnectTunnelInfo(ctx, zt) } else { - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() tunnel, err = c.Connect(ctx, target) } if err != nil { From 3395af90c3079836cd97597bf50a8ecda2d0871d Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 12:00:33 -0500 Subject: [PATCH 18/62] Do not set timeout for connect list --- src/cmd/connect.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/cmd/connect.go b/src/cmd/connect.go index e3a22c8ad5..f0e78146d0 100644 --- a/src/cmd/connect.go +++ b/src/cmd/connect.go @@ -96,9 +96,8 @@ var ( Run: func(_ *cobra.Command, _ []string) { clusterCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() - connectCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() - if err := cluster.NewClusterOrDie(clusterCtx).PrintConnectTable(connectCtx); err != nil { + ctx := context.Background() + if err := cluster.NewClusterOrDie(clusterCtx).PrintConnectTable(ctx); err != nil { message.Fatal(err, err.Error()) } }, From dee36c2d378fd535a098a387ac7d393dd0cce102 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 12:05:43 -0500 Subject: [PATCH 19/62] Do not set timeout for internal cmds and handle errors --- src/cmd/internal.go | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/src/cmd/internal.go b/src/cmd/internal.go index 47e94eeaf4..c30f592fb6 100644 --- a/src/cmd/internal.go +++ b/src/cmd/internal.go @@ -103,7 +103,7 @@ var genCLIDocs = &cobra.Command{ if toolCmd.Use == "monitor" { resetStringFlags(toolCmd) } - + if toolCmd.Use == "yq" { for _, subCmd := range toolCmd.Commands() { if subCmd.Name() == "shell-completion" { @@ -195,11 +195,12 @@ var createReadOnlyGiteaUser = &cobra.Command{ Short: lang.CmdInternalCreateReadOnlyGiteaUserShort, Long: lang.CmdInternalCreateReadOnlyGiteaUserLong, Run: func(_ *cobra.Command, _ []string) { - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + clusterCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() + ctx := context.Background() // Load the state so we can get the credentials for the admin git user - state, err := cluster.NewClusterOrDie(ctx).LoadZarfState(ctx) + state, err := cluster.NewClusterOrDie(clusterCtx).LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) } @@ -216,11 +217,12 @@ var createPackageRegistryToken = &cobra.Command{ Short: lang.CmdInternalArtifactRegistryGiteaTokenShort, Long: lang.CmdInternalArtifactRegistryGiteaTokenLong, Run: func(_ *cobra.Command, _ []string) { - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + clusterCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() + ctx := context.Background() // Load the state so we can get the credentials for the admin git user - c := cluster.NewClusterOrDie(ctx) + c := cluster.NewClusterOrDie(clusterCtx) state, err := c.LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) @@ -235,7 +237,9 @@ var createPackageRegistryToken = &cobra.Command{ state.ArtifactServer.PushToken = token.Sha1 - c.SaveZarfState(ctx, state) + if err := c.SaveZarfState(ctx, state); err != nil { + message.Fatal(err, err.Error()) + } } }, } @@ -302,6 +306,9 @@ func addHiddenDummyFlag(cmd *cobra.Command, flagDummy string) { if cmd.PersistentFlags().Lookup(flagDummy) == nil { var dummyStr string cmd.PersistentFlags().StringVar(&dummyStr, flagDummy, "", "") - cmd.PersistentFlags().MarkHidden(flagDummy) + err := cmd.PersistentFlags().MarkHidden(flagDummy) + if err != nil { + message.Fatal(err, err.Error()) + } } } From 5c0b771defc6b7af320ba063e3c5296da40b3551 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 12:13:58 -0500 Subject: [PATCH 20/62] Do not set timeout for package cmds --- src/cmd/package.go | 58 +++++++++++++++++++--------------------------- 1 file changed, 24 insertions(+), 34 deletions(-) diff --git a/src/cmd/package.go b/src/cmd/package.go index 0da391aece..9e8f7f27a1 100644 --- a/src/cmd/package.go +++ b/src/cmd/package.go @@ -10,7 +10,6 @@ import ( "path/filepath" "regexp" "strings" - "time" "github.com/defenseunicorns/zarf/src/cmd/common" "github.com/defenseunicorns/zarf/src/config/lang" @@ -50,16 +49,13 @@ var packageCreateCmd = &cobra.Command{ config.CommonOptions.CachePath = config.ZarfDefaultCachePath } - // Ensure uppercase keys from viper v := common.GetViper() pkgConfig.CreateOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgCreateSet), pkgConfig.CreateOpts.SetVariables, strings.ToUpper) - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Create the package if err := pkgClient.Create(); err != nil { message.Fatalf(err, lang.CmdPackageCreateErr, err.Error()) } @@ -75,21 +71,15 @@ var packageDeployCmd = &cobra.Command{ Run: func(_ *cobra.Command, args []string) { pkgConfig.PkgOpts.PackageSource = choosePackage(args) - // Ensure uppercase keys from viper and CLI --set v := common.GetViper() - - // Merge the viper config file variables and provided CLI flag variables (CLI takes precedence)) pkgConfig.PkgOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgDeploySet), pkgConfig.PkgOpts.SetVariables, strings.ToUpper) - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) - defer cancel() + ctx := context.Background() - // Deploy the package if err := pkgClient.Deploy(ctx); err != nil { message.Fatalf(err, lang.CmdPackageDeployErr, err.Error()) } @@ -106,12 +96,10 @@ var packageMirrorCmd = &cobra.Command{ Run: func(_ *cobra.Command, args []string) { pkgConfig.PkgOpts.PackageSource = choosePackage(args) - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() + ctx := context.Background() if err := pkgClient.Mirror(ctx); err != nil { message.Fatalf(err, lang.CmdPackageDeployErr, err.Error()) @@ -130,11 +118,9 @@ var packageInspectCmd = &cobra.Command{ src := identifyAndFallbackToClusterSource() - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig, packager.WithSource(src)) defer pkgClient.ClearTempPaths() - // Inspect the package if err := pkgClient.Inspect(); err != nil { message.Fatalf(err, lang.CmdPackageInspectErr, err.Error()) } @@ -147,11 +133,11 @@ var packageListCmd = &cobra.Command{ Aliases: []string{"l", "ls"}, Short: lang.CmdPackageListShort, Run: func(_ *cobra.Command, _ []string) { - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + clusterCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() + ctx := context.Background() - // Get all the deployed packages - deployedZarfPackages, errs := cluster.NewClusterOrDie(ctx).GetDeployedZarfPackages(ctx) + deployedZarfPackages, errs := cluster.NewClusterOrDie(clusterCtx).GetDeployedZarfPackages(ctx) if len(errs) > 0 && len(deployedZarfPackages) == 0 { message.Fatalf(errs, lang.CmdPackageListNoPackageWarn) } @@ -171,7 +157,6 @@ var packageListCmd = &cobra.Command{ }) } - // Print out the table for the user header := []string{"Package", "Version", "Components"} message.Table(header, packageData) @@ -191,12 +176,11 @@ var packageRemoveCmd = &cobra.Command{ pkgConfig.PkgOpts.PackageSource = choosePackage(args) src := identifyAndFallbackToClusterSource() - // Configure the packager + pkgClient := packager.NewOrDie(&pkgConfig, packager.WithSource(src)) defer pkgClient.ClearTempPaths() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() + ctx := context.Background() if err := pkgClient.Remove(ctx); err != nil { message.Fatalf(err, lang.CmdPackageRemoveErr, err.Error()) @@ -233,11 +217,9 @@ var packagePublishCmd = &cobra.Command{ pkgConfig.PublishOpts.PackageDestination = ref.String() - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Publish the package if err := pkgClient.Publish(); err != nil { message.Fatalf(err, lang.CmdPackagePublishErr, err.Error()) } @@ -252,11 +234,9 @@ var packagePullCmd = &cobra.Command{ Run: func(_ *cobra.Command, args []string) { pkgConfig.PkgOpts.PackageSource = args[0] - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Pull the package if err := pkgClient.Pull(); err != nil { message.Fatalf(err, lang.CmdPackagePullErr, err.Error()) } @@ -309,10 +289,8 @@ func getPackageCompletionArgs(_ *cobra.Command, _ []string, _ string) ([]string, return pkgCandidates, cobra.ShellCompDirectiveDefault } - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() + ctx := context.Background() - // Get all the deployed packages deployedZarfPackages, _ := c.GetDeployedZarfPackages(ctx) // Populate list of package names for _, pkg := range deployedZarfPackages { @@ -382,9 +360,18 @@ func bindCreateFlags(v *viper.Viper) { createFlags.IntVar(&pkgConfig.PkgOpts.Retries, "retries", v.GetInt(common.VPkgRetries), lang.CmdPackageFlagRetries) - createFlags.MarkHidden("output-directory") - createFlags.MarkHidden("key") - createFlags.MarkHidden("key-pass") + err := createFlags.MarkHidden("output-directory") + if err != nil { + message.Fatal(err, err.Error()) + } + err = createFlags.MarkHidden("key") + if err != nil { + message.Fatal(err, err.Error()) + } + err = createFlags.MarkHidden("key-pass") + if err != nil { + message.Fatal(err, err.Error()) + } } func bindDeployFlags(v *viper.Viper) { @@ -404,7 +391,10 @@ func bindDeployFlags(v *viper.Viper) { deployFlags.StringVar(&pkgConfig.PkgOpts.Shasum, "shasum", v.GetString(common.VPkgDeployShasum), lang.CmdPackageDeployFlagShasum) deployFlags.StringVar(&pkgConfig.PkgOpts.SGetKeyPath, "sget", v.GetString(common.VPkgDeploySget), lang.CmdPackageDeployFlagSget) - deployFlags.MarkHidden("sget") + err := deployFlags.MarkHidden("sget") + if err != nil { + message.Fatal(err, err.Error()) + } } func bindMirrorFlags(v *viper.Viper) { From 0de4a3af2dc9505208958fe050f79754e64174c4 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 12:21:40 -0500 Subject: [PATCH 21/62] Handle errors --- src/cmd/tools/archiver.go | 5 ++++- src/cmd/tools/helm/load_plugins.go | 11 +++++++++-- src/cmd/tools/helm/repo_add.go | 7 ++++++- src/cmd/tools/helm/repo_index.go | 6 +++++- src/cmd/tools/helm/root.go | 6 +++++- 5 files changed, 29 insertions(+), 6 deletions(-) diff --git a/src/cmd/tools/archiver.go b/src/cmd/tools/archiver.go index 9a1ee1c022..344cc7398c 100644 --- a/src/cmd/tools/archiver.go +++ b/src/cmd/tools/archiver.go @@ -93,5 +93,8 @@ func init() { archiverDecompressCmd.Flags().BoolVar(&unarchiveAll, "decompress-all", false, "Decompress all tarballs in the archive") archiverDecompressCmd.Flags().BoolVar(&unarchiveAll, "unarchive-all", false, "Unarchive all tarballs in the archive") archiverDecompressCmd.MarkFlagsMutuallyExclusive("decompress-all", "unarchive-all") - archiverDecompressCmd.Flags().MarkHidden("decompress-all") + err := archiverDecompressCmd.Flags().MarkHidden("decompress-all") + if err != nil { + message.Fatal(err, err.Error()) + } } diff --git a/src/cmd/tools/helm/load_plugins.go b/src/cmd/tools/helm/load_plugins.go index 28ea155030..f4d2800137 100644 --- a/src/cmd/tools/helm/load_plugins.go +++ b/src/cmd/tools/helm/load_plugins.go @@ -32,6 +32,7 @@ import ( "strings" "syscall" + "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/pkg/errors" "github.com/spf13/cobra" "sigs.k8s.io/yaml" @@ -216,7 +217,10 @@ func loadCompletionForPlugin(pluginCmd *cobra.Command, plugin *plugin.Plugin) { if err != nil { // The file could be missing or invalid. No static completion for this plugin. if settings.Debug { - log.Output(2, fmt.Sprintf("[info] %s\n", err.Error())) + err := log.Output(2, fmt.Sprintf("[info] %s\n", err.Error())) + if err != nil { + message.Fatal(err, err.Error()) + } } // Continue to setup dynamic completion. cmds = &pluginCommand{} @@ -238,7 +242,10 @@ func addPluginCommands(plugin *plugin.Plugin, baseCmd *cobra.Command, cmds *plug if len(cmds.Name) == 0 { // Missing name for a command if settings.Debug { - log.Output(2, fmt.Sprintf("[info] sub-command name field missing for %s", baseCmd.CommandPath())) + err := log.Output(2, fmt.Sprintf("[info] sub-command name field missing for %s", baseCmd.CommandPath())) + if err != nil { + message.Fatal(err, err.Error()) + } } return } diff --git a/src/cmd/tools/helm/repo_add.go b/src/cmd/tools/helm/repo_add.go index 7e6b4a52ab..7854f56d50 100644 --- a/src/cmd/tools/helm/repo_add.go +++ b/src/cmd/tools/helm/repo_add.go @@ -32,6 +32,7 @@ import ( "github.com/defenseunicorns/pkg/helpers" "github.com/defenseunicorns/zarf/src/pkg/cluster" + "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/gofrs/flock" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -133,7 +134,11 @@ func (o *repoAddOptions) run(out io.Writer) error { defer cancel() locked, err := fileLock.TryLockContext(lockCtx, time.Second) if err == nil && locked { - defer fileLock.Unlock() + defer func() { + if err := fileLock.Unlock(); err != nil { + message.Fatal(err, err.Error()) + } + }() } if err != nil { return err diff --git a/src/cmd/tools/helm/repo_index.go b/src/cmd/tools/helm/repo_index.go index a84a3af74c..1d6182e85e 100644 --- a/src/cmd/tools/helm/repo_index.go +++ b/src/cmd/tools/helm/repo_index.go @@ -27,6 +27,7 @@ import ( "path/filepath" "github.com/defenseunicorns/pkg/helpers" + "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -101,7 +102,10 @@ func index(dir, url, mergeTo string) error { var i2 *repo.IndexFile if _, err := os.Stat(mergeTo); os.IsNotExist(err) { i2 = repo.NewIndexFile() - i2.WriteFile(mergeTo, helpers.ReadAllWriteUser) + err := i2.WriteFile(mergeTo, helpers.ReadAllWriteUser) + if err != nil { + message.Fatal(err, err.Error()) + } } else { i2, err = repo.LoadIndexFile(mergeTo) if err != nil { diff --git a/src/cmd/tools/helm/root.go b/src/cmd/tools/helm/root.go index e496a93cec..b2754b64e2 100644 --- a/src/cmd/tools/helm/root.go +++ b/src/cmd/tools/helm/root.go @@ -29,6 +29,7 @@ import ( "os" "strings" + "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -156,7 +157,10 @@ func NewRootCmd(actionConfig *action.Configuration, out io.Writer, args []string // This call is required to gather configuration information prior to // execution. flags.ParseErrorsWhitelist.UnknownFlags = true - flags.Parse(args) + err = flags.Parse(args) + if err != nil { + message.Fatal(err, err.Error()) + } registryClient, err := newDefaultRegistryClient() if err != nil { From 3627ec03cd52ee87815e86a732806705cd939dab Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 12:31:56 -0500 Subject: [PATCH 22/62] Attempt to make logical improvements to WaitForPodsAndContainers --- src/pkg/k8s/pods.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/pkg/k8s/pods.go b/src/pkg/k8s/pods.go index d2aa495625..524a9817ec 100644 --- a/src/pkg/k8s/pods.go +++ b/src/pkg/k8s/pods.go @@ -107,7 +107,7 @@ func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, in }) if err != nil { k.Log("Unable to find matching pods: %w", err) - break + return nil } k.Log("Found %d pods for target %#v", len(pods.Items), target) @@ -130,37 +130,41 @@ func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, in // Handle container targeting if target.Container != "" { k.Log("Testing pod %q for container %q", pod.Name, target.Container) - var matchesInitContainer bool + var matchedContainer bool // Check the status of initContainers for a running match for _, initContainer := range pod.Status.InitContainerStatuses { isRunning := initContainer.State.Running != nil if isRunning && initContainer.Name == target.Container { // On running match in initContainer break this loop - matchesInitContainer = true + matchedContainer = true readyPods = append(readyPods, pod) break } } - - // Don't check any further if there's already a match - if matchesInitContainer { - continue + if matchedContainer { + break } // Check the status of regular containers for a running match for _, container := range pod.Status.ContainerStatuses { isRunning := container.State.Running != nil if isRunning && container.Name == target.Container { + matchedContainer = true readyPods = append(readyPods, pod) + break } } + if matchedContainer { + break + } } else { status := pod.Status.Phase k.Log("Testing pod %q phase, want (%q) got (%q)", pod.Name, corev1.PodRunning, status) // Regular status checking without a container if status == corev1.PodRunning { readyPods = append(readyPods, pod) + break } } } From 15c3078aeca3b130dd7522d50390db09a3cb428d Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 13:29:22 -0500 Subject: [PATCH 23/62] Fix TestConfigFile test --- .pre-commit-config.yaml | 1 + src/cmd/tools/helm/root.go | 6 +----- src/test/e2e/30_config_file_test.go | 8 +++++--- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7b3bcc66b4..818883595a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,6 +9,7 @@ repos: args: - "--allow-missing-credentials" - id: detect-private-key + exclude: "src/test/e2e/30_config_file_test.go" - id: end-of-file-fixer - id: fix-byte-order-marker - id: trailing-whitespace diff --git a/src/cmd/tools/helm/root.go b/src/cmd/tools/helm/root.go index b2754b64e2..8f18a91827 100644 --- a/src/cmd/tools/helm/root.go +++ b/src/cmd/tools/helm/root.go @@ -29,7 +29,6 @@ import ( "os" "strings" - "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -157,10 +156,7 @@ func NewRootCmd(actionConfig *action.Configuration, out io.Writer, args []string // This call is required to gather configuration information prior to // execution. flags.ParseErrorsWhitelist.UnknownFlags = true - err = flags.Parse(args) - if err != nil { - message.Fatal(err, err.Error()) - } + _ = flags.Parse(args) registryClient, err := newDefaultRegistryClient() if err != nil { diff --git a/src/test/e2e/30_config_file_test.go b/src/test/e2e/30_config_file_test.go index a03844fa5a..31425de829 100644 --- a/src/test/e2e/30_config_file_test.go +++ b/src/test/e2e/30_config_file_test.go @@ -27,8 +27,8 @@ func TestConfigFile(t *testing.T) { // Test the config file environment variable os.Setenv("ZARF_CONFIG", filepath.Join(dir, config)) + defer os.Unsetenv("ZARF_CONFIG") configFileTests(t, dir, path) - os.Unsetenv("ZARF_CONFIG") configFileDefaultTests(t) @@ -39,6 +39,8 @@ func TestConfigFile(t *testing.T) { } func configFileTests(t *testing.T, dir, path string) { + t.Helper() + _, stdErr, err := e2e.Zarf("package", "create", dir, "--confirm") require.NoError(t, err) require.Contains(t, string(stdErr), "This is a zebra and they have stripes") @@ -94,6 +96,7 @@ H4RxbE+FpmsMAUCpdrzvFkc= } func configFileDefaultTests(t *testing.T) { + t.Helper() globalFlags := []string{ "architecture: 509a38f0", @@ -137,6 +140,7 @@ func configFileDefaultTests(t *testing.T) { // Test remaining default initializers os.Setenv("ZARF_CONFIG", filepath.Join("src", "test", "zarf-config-test.toml")) + defer os.Unsetenv("ZARF_CONFIG") // Test global flags stdOut, _, _ := e2e.Zarf("--help") @@ -161,6 +165,4 @@ func configFileDefaultTests(t *testing.T) { for _, test := range packageDeployFlags { require.Contains(t, string(stdOut), test) } - - os.Unsetenv("ZARF_CONFIG") } From 55cccf8de55a1ce04ce9ec9dba6e325ee560e0f8 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 13:33:09 -0500 Subject: [PATCH 24/62] Do not set timeout for destroy ops --- src/cmd/destroy.go | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/src/cmd/destroy.go b/src/cmd/destroy.go index b81bd59c11..9779a80f6b 100644 --- a/src/cmd/destroy.go +++ b/src/cmd/destroy.go @@ -9,7 +9,6 @@ import ( "errors" "os" "regexp" - "time" "github.com/defenseunicorns/pkg/helpers" "github.com/defenseunicorns/zarf/src/config" @@ -31,16 +30,15 @@ var destroyCmd = &cobra.Command{ Short: lang.CmdDestroyShort, Long: lang.CmdDestroyLong, Run: func(_ *cobra.Command, _ []string) { - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + clusterCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) defer cancel() - c, err := cluster.NewClusterWithWait(ctx) + c, err := cluster.NewClusterWithWait(clusterCtx) if err != nil { message.Fatalf(err, lang.ErrNoClusterConnection) } - ctx, cancel = context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() + ctx := context.Background() // NOTE: If 'zarf init' failed to deploy the k3s component (or if we're looking at the wrong kubeconfig) // there will be no zarf-state to load and the struct will be empty. In these cases, if we can find @@ -81,17 +79,11 @@ var destroyCmd = &cobra.Command{ // Perform chart uninstallation helm.Destroy(removeComponents) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - // If Zarf didn't deploy the cluster, only delete the ZarfNamespace if err := c.DeleteZarfNamespace(ctx); err != nil { message.Fatal(err, err.Error()) } - ctx, cancel = context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() - // Remove zarf agent labels and secrets from namespaces Zarf doesn't manage c.StripZarfLabelsAndSecretsFromNamespaces(ctx) } From 4f62bef14d22e94126f23a4d7e5fe4f293b8126b Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 14:22:21 -0500 Subject: [PATCH 25/62] Do not set timeout for crane ops --- src/cmd/tools/crane.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/cmd/tools/crane.go b/src/cmd/tools/crane.go index b78f961847..c1f98a22fc 100644 --- a/src/cmd/tools/crane.go +++ b/src/cmd/tools/crane.go @@ -124,10 +124,8 @@ func zarfCraneCatalog(cranePlatformOptions *[]crane.Option) *cobra.Command { return err } - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() + ctx := context.Background() - // Load Zarf state zarfState, err := c.LoadZarfState(ctx) if err != nil { return err @@ -176,10 +174,8 @@ func zarfCraneInternalWrapper(commandToWrap func(*[]crane.Option) *cobra.Command message.Note(lang.CmdToolsRegistryZarfState) - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() + ctx := context.Background() - // Load the state (if able) zarfState, err := c.LoadZarfState(ctx) if err != nil { message.Warnf(lang.CmdToolsCraneConnectedButBadStateErr, err.Error()) From c9cdd0d77a05ec9947b78b50290dba1f349a58aa Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 14:36:52 -0500 Subject: [PATCH 26/62] Move NewClusterOrDie to src/cmd/common --- src/cmd/common/utils.go | 13 +++++++++++++ src/cmd/connect.go | 4 +--- src/cmd/internal.go | 10 ++-------- src/cmd/package.go | 5 +---- src/cmd/tools/zarf.go | 13 ++++--------- src/pkg/cluster/common.go | 10 ---------- src/test/e2e/00_use_cli_test.go | 8 ++++---- src/test/e2e/22_git_and_gitops_test.go | 5 +++-- 8 files changed, 28 insertions(+), 40 deletions(-) diff --git a/src/cmd/common/utils.go b/src/cmd/common/utils.go index 54113ee90f..9bf23d4a97 100644 --- a/src/cmd/common/utils.go +++ b/src/cmd/common/utils.go @@ -5,11 +5,13 @@ package common import ( + "context" "os" "os/signal" "syscall" "github.com/defenseunicorns/zarf/src/config/lang" + "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/pkg/message" ) @@ -35,3 +37,14 @@ func ExitOnInterrupt() { } }() } + +// NewClusterOrDie creates a new Cluster instance and waits for the cluster to be ready or throws a fatal error. +func NewClusterOrDie() *cluster.Cluster { + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + defer cancel() + c, err := cluster.NewClusterWithWait(ctx) + if err != nil { + message.Fatalf(err, "Failed to connect to cluster") + } + return c +} diff --git a/src/cmd/connect.go b/src/cmd/connect.go index f0e78146d0..0fbfc9d961 100644 --- a/src/cmd/connect.go +++ b/src/cmd/connect.go @@ -94,10 +94,8 @@ var ( Aliases: []string{"l"}, Short: lang.CmdConnectListShort, Run: func(_ *cobra.Command, _ []string) { - clusterCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() ctx := context.Background() - if err := cluster.NewClusterOrDie(clusterCtx).PrintConnectTable(ctx); err != nil { + if err := common.NewClusterOrDie().PrintConnectTable(ctx); err != nil { message.Fatal(err, err.Error()) } }, diff --git a/src/cmd/internal.go b/src/cmd/internal.go index c30f592fb6..db32d899ff 100644 --- a/src/cmd/internal.go +++ b/src/cmd/internal.go @@ -195,12 +195,10 @@ var createReadOnlyGiteaUser = &cobra.Command{ Short: lang.CmdInternalCreateReadOnlyGiteaUserShort, Long: lang.CmdInternalCreateReadOnlyGiteaUserLong, Run: func(_ *cobra.Command, _ []string) { - clusterCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() ctx := context.Background() // Load the state so we can get the credentials for the admin git user - state, err := cluster.NewClusterOrDie(clusterCtx).LoadZarfState(ctx) + state, err := common.NewClusterOrDie().LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) } @@ -217,12 +215,8 @@ var createPackageRegistryToken = &cobra.Command{ Short: lang.CmdInternalArtifactRegistryGiteaTokenShort, Long: lang.CmdInternalArtifactRegistryGiteaTokenLong, Run: func(_ *cobra.Command, _ []string) { - clusterCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() + c := common.NewClusterOrDie() ctx := context.Background() - - // Load the state so we can get the credentials for the admin git user - c := cluster.NewClusterOrDie(clusterCtx) state, err := c.LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) diff --git a/src/cmd/package.go b/src/cmd/package.go index 9e8f7f27a1..3b84252d82 100644 --- a/src/cmd/package.go +++ b/src/cmd/package.go @@ -133,11 +133,8 @@ var packageListCmd = &cobra.Command{ Aliases: []string{"l", "ls"}, Short: lang.CmdPackageListShort, Run: func(_ *cobra.Command, _ []string) { - clusterCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() ctx := context.Background() - - deployedZarfPackages, errs := cluster.NewClusterOrDie(clusterCtx).GetDeployedZarfPackages(ctx) + deployedZarfPackages, errs := common.NewClusterOrDie().GetDeployedZarfPackages(ctx) if len(errs) > 0 && len(deployedZarfPackages) == 0 { message.Fatalf(errs, lang.CmdPackageListNoPackageWarn) } diff --git a/src/cmd/tools/zarf.go b/src/cmd/tools/zarf.go index 91461f7317..d04f487488 100644 --- a/src/cmd/tools/zarf.go +++ b/src/cmd/tools/zarf.go @@ -19,7 +19,6 @@ import ( "github.com/defenseunicorns/zarf/src/config/lang" "github.com/defenseunicorns/zarf/src/internal/packager/git" "github.com/defenseunicorns/zarf/src/internal/packager/helm" - "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/defenseunicorns/zarf/src/pkg/packager/sources" "github.com/defenseunicorns/zarf/src/pkg/pki" @@ -52,10 +51,8 @@ var getCredsCmd = &cobra.Command{ Aliases: []string{"gc"}, Args: cobra.MaximumNArgs(1), Run: func(_ *cobra.Command, args []string) { - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() - - state, err := cluster.NewClusterOrDie(ctx).LoadZarfState(ctx) + ctx := context.Background() + state, err := common.NewClusterOrDie().LoadZarfState(ctx) if err != nil || state.Distro == "" { // If no distro the zarf secret did not load properly message.Fatalf(nil, lang.ErrLoadState) @@ -88,10 +85,8 @@ var updateCredsCmd = &cobra.Command{ } } - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() - - c := cluster.NewClusterOrDie(ctx) + c := common.NewClusterOrDie() + ctx := context.Background() oldState, err := c.LoadZarfState(ctx) if err != nil || oldState.Distro == "" { // If no distro the zarf secret did not load properly diff --git a/src/pkg/cluster/common.go b/src/pkg/cluster/common.go index 895124bf6b..8b2153999b 100644 --- a/src/pkg/cluster/common.go +++ b/src/pkg/cluster/common.go @@ -28,16 +28,6 @@ var labels = k8s.Labels{ config.ZarfManagedByLabel: "zarf", } -// NewClusterOrDie creates a new Cluster instance and waits for the cluster to be ready or throws a fatal error. -func NewClusterOrDie(ctx context.Context) *Cluster { - c, err := NewClusterWithWait(ctx) - if err != nil { - message.Fatalf(err, "Failed to connect to cluster") - } - - return c -} - // NewClusterWithWait creates a new Cluster instance and waits for the given timeout for the cluster to be ready. func NewClusterWithWait(ctx context.Context) (*Cluster, error) { spinner := message.NewProgressSpinner("Waiting for cluster connection") diff --git a/src/test/e2e/00_use_cli_test.go b/src/test/e2e/00_use_cli_test.go index eec380619e..afc556e2c3 100644 --- a/src/test/e2e/00_use_cli_test.go +++ b/src/test/e2e/00_use_cli_test.go @@ -222,15 +222,15 @@ func TestUseCLI(t *testing.T) { // Test that yq can eval properly _, stdErr, err := e2e.Zarf("tools", "yq", "eval", "-i", `.items[1].name = "renamed-item"`, file) require.NoError(t, err, stdErr) - stdOut, stdErr, err := e2e.Zarf("tools", "yq", ".items[1].name", file) + stdOut, _, err := e2e.Zarf("tools", "yq", ".items[1].name", file) + require.NoError(t, err) require.Contains(t, stdOut, "renamed-item") // Test that yq ea can be used properly _, stdErr, err = e2e.Zarf("tools", "yq", "eval-all", "-i", `. as $doc ireduce ({}; .items += $doc.items)`, file, otherFile) require.NoError(t, err, stdErr) - stdOut, stdErr, err = e2e.Zarf("tools", "yq", "e", ".items | length", file) + stdOut, _, err = e2e.Zarf("tools", "yq", "e", ".items | length", file) + require.NoError(t, err) require.Equal(t, "4\n", stdOut) - }) } - diff --git a/src/test/e2e/22_git_and_gitops_test.go b/src/test/e2e/22_git_and_gitops_test.go index 1105d4f716..cb657d48cf 100644 --- a/src/test/e2e/22_git_and_gitops_test.go +++ b/src/test/e2e/22_git_and_gitops_test.go @@ -13,6 +13,7 @@ import ( "path/filepath" "testing" + "github.com/defenseunicorns/zarf/src/cmd/common" "github.com/defenseunicorns/zarf/src/internal/packager/git" "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/types" @@ -69,7 +70,7 @@ func testGitServerConnect(t *testing.T, gitURL string) { func testGitServerReadOnly(ctx context.Context, t *testing.T, gitURL string) { // Init the state variable - state, err := cluster.NewClusterOrDie(ctx).LoadZarfState(ctx) + state, err := common.NewClusterOrDie().LoadZarfState(ctx) require.NoError(t, err) gitCfg := git.New(state.GitServer) @@ -92,7 +93,7 @@ func testGitServerReadOnly(ctx context.Context, t *testing.T, gitURL string) { func testGitServerTagAndHash(ctx context.Context, t *testing.T, gitURL string) { // Init the state variable - state, err := cluster.NewClusterOrDie(ctx).LoadZarfState(ctx) + state, err := common.NewClusterOrDie().LoadZarfState(ctx) require.NoError(t, err, "Failed to load Zarf state") repoName := "zarf-public-test-2469062884" From af66e3541e2074c99d6c9b18f4ca8c6d422e7ec1 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 14:39:16 -0500 Subject: [PATCH 27/62] Do not set timeout for prune images --- src/cmd/tools/crane.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/cmd/tools/crane.go b/src/cmd/tools/crane.go index c1f98a22fc..d4fde65413 100644 --- a/src/cmd/tools/crane.go +++ b/src/cmd/tools/crane.go @@ -220,16 +220,13 @@ func pruneImages(_ *cobra.Command, _ []string) error { return err } - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() + ctx := context.Background() - // Load the state zarfState, err := c.LoadZarfState(ctx) if err != nil { return err } - // Load the currently deployed packages zarfPackages, errs := c.GetDeployedZarfPackages(ctx) if len(errs) > 0 { return lang.ErrUnableToGetPackages From a7fc6620aae427c113c3d575e7214d2d5d80b471 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 14:41:27 -0500 Subject: [PATCH 28/62] Do not set timeout for update-gitea-pvc --- src/cmd/internal.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/cmd/internal.go b/src/cmd/internal.go index db32d899ff..1624648b99 100644 --- a/src/cmd/internal.go +++ b/src/cmd/internal.go @@ -18,7 +18,6 @@ import ( "github.com/defenseunicorns/zarf/src/config/lang" "github.com/defenseunicorns/zarf/src/internal/agent" "github.com/defenseunicorns/zarf/src/internal/packager/git" - "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/defenseunicorns/zarf/src/types" "github.com/spf13/cobra" @@ -243,8 +242,7 @@ var updateGiteaPVC = &cobra.Command{ Short: lang.CmdInternalUpdateGiteaPVCShort, Long: lang.CmdInternalUpdateGiteaPVCLong, Run: func(_ *cobra.Command, _ []string) { - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() + ctx := context.Background() // There is a possibility that the pvc does not yet exist and Gitea helm chart should create it helmShouldCreate, err := git.UpdateGiteaPVC(ctx, rollback) From f32398fa0868e1d1446ef45aa6ff51c7030cddcc Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 15:49:26 -0500 Subject: [PATCH 29/62] Use NewClusterOrDie in destroy cmd --- src/cmd/destroy.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/cmd/destroy.go b/src/cmd/destroy.go index 9779a80f6b..d9eeb3df40 100644 --- a/src/cmd/destroy.go +++ b/src/cmd/destroy.go @@ -11,10 +11,10 @@ import ( "regexp" "github.com/defenseunicorns/pkg/helpers" + "github.com/defenseunicorns/zarf/src/cmd/common" "github.com/defenseunicorns/zarf/src/config" "github.com/defenseunicorns/zarf/src/config/lang" "github.com/defenseunicorns/zarf/src/internal/packager/helm" - "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/defenseunicorns/zarf/src/pkg/utils/exec" @@ -30,14 +30,7 @@ var destroyCmd = &cobra.Command{ Short: lang.CmdDestroyShort, Long: lang.CmdDestroyLong, Run: func(_ *cobra.Command, _ []string) { - clusterCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() - - c, err := cluster.NewClusterWithWait(clusterCtx) - if err != nil { - message.Fatalf(err, lang.ErrNoClusterConnection) - } - + c := common.NewClusterOrDie() ctx := context.Background() // NOTE: If 'zarf init' failed to deploy the k3s component (or if we're looking at the wrong kubeconfig) From ed89d81e571f3a66367fa8031a27e2008b2d3308 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 18 Apr 2024 15:59:36 -0500 Subject: [PATCH 30/62] Do not set timeout for helm post-render --- src/internal/packager/helm/post-render.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/internal/packager/helm/post-render.go b/src/internal/packager/helm/post-render.go index ccb3446a97..ce69f57df6 100644 --- a/src/internal/packager/helm/post-render.go +++ b/src/internal/packager/helm/post-render.go @@ -15,7 +15,6 @@ import ( "github.com/defenseunicorns/pkg/helpers" "github.com/defenseunicorns/zarf/src/config" "github.com/defenseunicorns/zarf/src/internal/packager/template" - "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/defenseunicorns/zarf/src/pkg/utils" "github.com/defenseunicorns/zarf/src/types" @@ -96,8 +95,7 @@ func (r *renderer) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) { finalManifestsOutput := bytes.NewBuffer(nil) if r.cluster != nil { - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() + ctx := context.Background() if err := r.editHelmResources(ctx, resources, finalManifestsOutput); err != nil { return nil, err From 7e8cc18f47f5547e3ba30682c36cea47d620b5fe Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Fri, 19 Apr 2024 11:58:05 -0500 Subject: [PATCH 31/62] Do not wait before checking for ns in DeleteNamespace() --- src/pkg/k8s/namespace.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pkg/k8s/namespace.go b/src/pkg/k8s/namespace.go index 4d0c62472b..db54fcbc5c 100644 --- a/src/pkg/k8s/namespace.go +++ b/src/pkg/k8s/namespace.go @@ -50,14 +50,14 @@ func (k *K8s) DeleteNamespace(ctx context.Context, name string) error { } for { + _, err := k.Clientset.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } select { case <-ctx.Done(): return ctx.Err() case <-time.After(1 * time.Second): - _, err := k.Clientset.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil - } } } } From 75dc0483208631c53542020ea99605f228430b89 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Fri, 19 Apr 2024 12:10:04 -0500 Subject: [PATCH 32/62] Do not wait before finding an image for injector pod --- src/pkg/cluster/injector.go | 87 +++++++++++++++++++------------------ 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/src/pkg/cluster/injector.go b/src/pkg/cluster/injector.go index fd127ed945..2fb721cb0b 100644 --- a/src/pkg/cluster/injector.go +++ b/src/pkg/cluster/injector.go @@ -164,28 +164,30 @@ func (c *Cluster) loadSeedImages(imagesDir, seedImagesDir string, injectorSeedSr spinner.Updatef("Loading the seed image '%s' from the package", src) ref, err := transform.ParseImageRef(src) if err != nil { - return seedImages, fmt.Errorf("failed to create ref for image %s: %w", src, err) + return nil, fmt.Errorf("failed to create ref for image %s: %w", src, err) } img, err := utils.LoadOCIImage(imagesDir, ref) if err != nil { - return seedImages, err + return nil, err } - crane.SaveOCI(img, seedImagesDir) + if err := crane.SaveOCI(img, seedImagesDir); err != nil { + return nil, err + } seedImages = append(seedImages, ref) // Get the image digest so we can set an annotation in the image.json later imgDigest, err := img.Digest() if err != nil { - return seedImages, err + return nil, err } // This is done _without_ the domain (different from pull.go) since the injector only handles local images localReferenceToDigest[ref.Path+ref.TagOrDigest] = imgDigest.String() } if err := utils.AddImageNameAnnotation(seedImagesDir, localReferenceToDigest); err != nil { - return seedImages, fmt.Errorf("unable to format OCI layout: %w", err) + return nil, fmt.Errorf("unable to format OCI layout: %w", err) } return seedImages, nil @@ -434,57 +436,58 @@ func (c *Cluster) buildInjectionPod(node, image string, payloadConfigmaps []stri return pod, nil } -// getImagesFromAvailableNodes checks for images on schedulable nodes within a cluster and returns +// getImagesAndNodesForInjection checks for images on schedulable nodes within a cluster. func (c *Cluster) getImagesAndNodesForInjection(ctx context.Context) (imageNodeMap, error) { result := make(imageNodeMap) for { - select { - case <-ctx.Done(): - return nil, fmt.Errorf("get image list timed-out: %w", ctx.Err()) - case <-time.After(2 * time.Second): - pods, err := c.GetPods(ctx, corev1.NamespaceAll, metav1.ListOptions{ - FieldSelector: fmt.Sprintf("status.phase=%s", corev1.PodRunning), - }) + pods, err := c.GetPods(ctx, corev1.NamespaceAll, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("status.phase=%s", corev1.PodRunning), + }) + if err != nil { + return nil, fmt.Errorf("unable to get the list of %q pods in the cluster: %w", corev1.PodRunning, err) + } + + for _, pod := range pods.Items { + nodeName := pod.Spec.NodeName + + nodeDetails, err := c.GetNode(ctx, nodeName) if err != nil { - return nil, fmt.Errorf("unable to get the list of %q pods in the cluster: %w", corev1.PodRunning, err) + return nil, fmt.Errorf("unable to get the node %q: %w", nodeName, err) } - for _, pod := range pods.Items { - nodeName := pod.Spec.NodeName - - nodeDetails, err := c.GetNode(ctx, nodeName) - if err != nil { - return nil, fmt.Errorf("unable to get the node %q: %w", nodeName, err) - } + if nodeDetails.Status.Allocatable.Cpu().Cmp(injectorRequestedCPU) < 0 || + nodeDetails.Status.Allocatable.Memory().Cmp(injectorRequestedMemory) < 0 { + continue + } - if nodeDetails.Status.Allocatable.Cpu().Cmp(injectorRequestedCPU) < 0 || - nodeDetails.Status.Allocatable.Memory().Cmp(injectorRequestedMemory) < 0 { + for _, taint := range nodeDetails.Spec.Taints { + if taint.Effect == corev1.TaintEffectNoSchedule || taint.Effect == corev1.TaintEffectNoExecute { continue } - - for _, taint := range nodeDetails.Spec.Taints { - if taint.Effect == corev1.TaintEffectNoSchedule || taint.Effect == corev1.TaintEffectNoExecute { - continue - } - } - - for _, container := range pod.Spec.InitContainers { - result[container.Image] = append(result[container.Image], nodeName) - } - for _, container := range pod.Spec.Containers { - result[container.Image] = append(result[container.Image], nodeName) - } - for _, container := range pod.Spec.EphemeralContainers { - result[container.Image] = append(result[container.Image], nodeName) - } } - if len(result) > 0 { - return result, nil + for _, container := range pod.Spec.InitContainers { + result[container.Image] = append(result[container.Image], nodeName) } + for _, container := range pod.Spec.Containers { + result[container.Image] = append(result[container.Image], nodeName) + } + for _, container := range pod.Spec.EphemeralContainers { + result[container.Image] = append(result[container.Image], nodeName) + } + } - c.Log("No images found on any node. Retrying...") + if len(result) > 0 { + return result, nil + } + + c.Log("No images found on any node. Retrying...") + + select { + case <-ctx.Done(): + return nil, fmt.Errorf("get image list timed-out: %w", ctx.Err()) + case <-time.After(2 * time.Second): } } } From 8af2c43f1d21b59e0ea915ee802ecea58bdf0853 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Fri, 19 Apr 2024 12:29:21 -0500 Subject: [PATCH 33/62] Do not wait before checking pkg secret in RecordPackageDeploymentAndWait() --- src/pkg/cluster/zarf.go | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/pkg/cluster/zarf.go b/src/pkg/cluster/zarf.go index 1a90be06ab..a37c4f5759 100644 --- a/src/pkg/cluster/zarf.go +++ b/src/pkg/cluster/zarf.go @@ -133,7 +133,7 @@ func (c *Cluster) RecordPackageDeploymentAndWait(ctx context.Context, pkg types. packageNeedsWait, waitSeconds, hookName := c.PackageSecretNeedsWait(deployedPackage, component, skipWebhooks) // If no webhooks need to complete, we can return immediately. if !packageNeedsWait { - return nil, nil + return deployedPackage, nil } waitDuration := types.DefaultWebhookWaitDuration @@ -148,19 +148,21 @@ func (c *Cluster) RecordPackageDeploymentAndWait(ctx context.Context, pkg types. defer spinner.Stop() for { + deployedPackage, err = c.GetDeployedPackage(ctx, deployedPackage.Name) + if err != nil { + return nil, err + } + + packageNeedsWait, _, _ = c.PackageSecretNeedsWait(deployedPackage, component, skipWebhooks) + if !packageNeedsWait { + spinner.Success() + return deployedPackage, nil + } + select { case <-waitCtx.Done(): - return nil, fmt.Errorf("timed out waiting for package deployment to complete: %w", waitCtx.Err()) + return nil, fmt.Errorf("timed out waiting for webhook %q to complete for component %q: %w", hookName, component.Name, waitCtx.Err()) case <-time.After(1 * time.Second): - deployedPackage, err = c.GetDeployedPackage(ctx, deployedPackage.Name) - if err != nil { - return nil, err - } - packageNeedsWait, _, _ = c.PackageSecretNeedsWait(deployedPackage, component, skipWebhooks) - if !packageNeedsWait { - spinner.Success() - return deployedPackage, nil - } } } } From d3b5e3d73305a19066c5c9517660868d07537720 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Fri, 19 Apr 2024 12:43:38 -0500 Subject: [PATCH 34/62] Do not wait before checking for healthy cluster --- src/pkg/k8s/common.go | 61 ++++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/src/pkg/k8s/common.go b/src/pkg/k8s/common.go index 987c1c238a..19731f470c 100644 --- a/src/pkg/k8s/common.go +++ b/src/pkg/k8s/common.go @@ -50,42 +50,43 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { var pods *v1.PodList for { - select { - case <-ctx.Done(): - return fmt.Errorf("timed out waiting for cluster to report healthy: %w", ctx.Err()) - case <-time.After(1 * time.Second): - if k.RestConfig == nil || k.Clientset == nil { - config, clientset, err := connect() - if err != nil { - k.Log("Cluster connection not available yet: %w", err) - continue - } - - k.RestConfig = config - k.Clientset = clientset - } - - // Make sure there is at least one running Node - nodes, err = k.GetNodes(ctx) - if err != nil || len(nodes.Items) < 1 { - k.Log("No nodes reporting healthy yet: %#v\n", err) + if k.RestConfig == nil || k.Clientset == nil { + config, clientset, err := connect() + if err != nil { + k.Log("Cluster connection not available yet: %w", err) continue } - // Get the cluster pod list - if pods, err = k.GetAllPods(ctx); err != nil { - k.Log("Could not get the pod list: %w", err) - continue - } + k.RestConfig = config + k.Clientset = clientset + } - // Check that at least one pod is in the 'succeeded' or 'running' state - for _, pod := range pods.Items { - if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodRunning { - return nil - } + // Make sure there is at least one running Node + nodes, err = k.GetNodes(ctx) + if err != nil || len(nodes.Items) < 1 { + k.Log("No nodes reporting healthy yet: %#v\n", err) + continue + } + + // Get the cluster pod list + if pods, err = k.GetAllPods(ctx); err != nil { + k.Log("Could not get the pod list: %w", err) + continue + } + + // Check that at least one pod is in the 'succeeded' or 'running' state + for _, pod := range pods.Items { + if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodRunning { + return nil } + } + + k.Log("No pods reported 'succeeded' or 'running' state yet.") - k.Log("No pods reported 'succeeded' or 'running' state yet.") + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting for cluster to report healthy: %w", ctx.Err()) + case <-time.After(1 * time.Second): } } } From de5df64afe823aabe457412842e3d83869f98c25 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Fri, 19 Apr 2024 12:46:37 -0500 Subject: [PATCH 35/62] Do not wait before checking for deleted pod --- src/pkg/k8s/pods.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/pkg/k8s/pods.go b/src/pkg/k8s/pods.go index 524a9817ec..41b236f7b1 100644 --- a/src/pkg/k8s/pods.go +++ b/src/pkg/k8s/pods.go @@ -45,14 +45,15 @@ func (k *K8s) DeletePod(ctx context.Context, namespace string, name string) erro } for { + _, err := k.Clientset.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } + select { case <-ctx.Done(): return ctx.Err() case <-time.After(1 * time.Second): - _, err := k.Clientset.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil - } } } } From d5eef95b047eb1800a1a6dafd1a12c98c04b8510 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Fri, 19 Apr 2024 12:55:29 -0500 Subject: [PATCH 36/62] Do not wait before checking for matching containers --- src/pkg/k8s/pods.go | 123 ++++++++++++++++++++++---------------------- 1 file changed, 62 insertions(+), 61 deletions(-) diff --git a/src/pkg/k8s/pods.go b/src/pkg/k8s/pods.go index 41b236f7b1..daea645eca 100644 --- a/src/pkg/k8s/pods.go +++ b/src/pkg/k8s/pods.go @@ -91,87 +91,88 @@ func (k *K8s) GetPods(ctx context.Context, namespace string, listOpts metav1.Lis } // WaitForPodsAndContainers attempts to find pods matching the given selector and optional inclusion filter -// It will wait up to 30 seconds for the pods to be found and will return a list of matching pod names +// It will wait up to 90 seconds for the pods to be found and will return a list of matching pod names // If the timeout is reached, an empty list will be returned. func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, include PodFilter) []corev1.Pod { - waitCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + waitCtx, cancel := context.WithTimeout(ctx, 90*time.Second) defer cancel() for { - select { - case <-waitCtx.Done(): - k.Log("Pod lookup timeout or context cancelled: %w", ctx.Err()) + pods, err := k.GetPods(ctx, target.Namespace, metav1.ListOptions{ + LabelSelector: target.Selector, + }) + if err != nil { + k.Log("Unable to find matching pods: %w", err) return nil - case <-time.After(3 * time.Second): - pods, err := k.Clientset.CoreV1().Pods(target.Namespace).List(ctx, metav1.ListOptions{ - LabelSelector: target.Selector, - }) - if err != nil { - k.Log("Unable to find matching pods: %w", err) - return nil - } + } - k.Log("Found %d pods for target %#v", len(pods.Items), target) + k.Log("Found %d pods for target %#v", len(pods.Items), target) - var readyPods = []corev1.Pod{} + var readyPods = []corev1.Pod{} - // Sort the pods from newest to oldest - sort.Slice(pods.Items, func(i, j int) bool { - return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) - }) + // Sort the pods from newest to oldest + sort.Slice(pods.Items, func(i, j int) bool { + return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) + }) - for _, pod := range pods.Items { - k.Log("Testing pod %q", pod.Name) + for _, pod := range pods.Items { + k.Log("Testing pod %q", pod.Name) - // If an include function is provided, only keep pods that return true - if include != nil && !include(pod) { - continue - } + // If an include function is provided, only keep pods that return true + if include != nil && !include(pod) { + continue + } - // Handle container targeting - if target.Container != "" { - k.Log("Testing pod %q for container %q", pod.Name, target.Container) - var matchedContainer bool - - // Check the status of initContainers for a running match - for _, initContainer := range pod.Status.InitContainerStatuses { - isRunning := initContainer.State.Running != nil - if isRunning && initContainer.Name == target.Container { - // On running match in initContainer break this loop - matchedContainer = true - readyPods = append(readyPods, pod) - break - } - } - if matchedContainer { + // Handle container targeting + if target.Container != "" { + k.Log("Testing pod %q for container %q", pod.Name, target.Container) + var matchedContainer bool + + // Check the status of initContainers for a running match + for _, initContainer := range pod.Status.InitContainerStatuses { + isRunning := initContainer.State.Running != nil + if isRunning && initContainer.Name == target.Container { + // On running match in initContainer break this loop + matchedContainer = true + readyPods = append(readyPods, pod) break } + } + if matchedContainer { + break + } - // Check the status of regular containers for a running match - for _, container := range pod.Status.ContainerStatuses { - isRunning := container.State.Running != nil - if isRunning && container.Name == target.Container { - matchedContainer = true - readyPods = append(readyPods, pod) - break - } - } - if matchedContainer { - break - } - } else { - status := pod.Status.Phase - k.Log("Testing pod %q phase, want (%q) got (%q)", pod.Name, corev1.PodRunning, status) - // Regular status checking without a container - if status == corev1.PodRunning { + // Check the status of regular containers for a running match + for _, container := range pod.Status.ContainerStatuses { + isRunning := container.State.Running != nil + if isRunning && container.Name == target.Container { + matchedContainer = true readyPods = append(readyPods, pod) break } } + if matchedContainer { + break + } + } else { + status := pod.Status.Phase + k.Log("Testing pod %q phase, want (%q) got (%q)", pod.Name, corev1.PodRunning, status) + // Regular status checking without a container + if status == corev1.PodRunning { + readyPods = append(readyPods, pod) + break + } } - if len(readyPods) > 0 { - return readyPods - } + } + if len(readyPods) > 0 { + return readyPods + } + + select { + case <-waitCtx.Done(): + k.Log("Pod lookup timeout or context cancelled: %w", ctx.Err()) + return nil + case <-time.After(3 * time.Second): } } } From 0c4fca65a7034c8d2fc27d655b1f52e215a5bcab Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Fri, 19 Apr 2024 13:04:06 -0500 Subject: [PATCH 37/62] Do not wait before checking for a service account --- src/pkg/k8s/sa.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/pkg/k8s/sa.go b/src/pkg/k8s/sa.go index 33eaaf4bc7..759ae799da 100644 --- a/src/pkg/k8s/sa.go +++ b/src/pkg/k8s/sa.go @@ -40,18 +40,21 @@ func (k *K8s) UpdateServiceAccount(ctx context.Context, svcAccount *corev1.Servi // WaitForServiceAccount waits for a service account to be created in the cluster. func (k *K8s) WaitForServiceAccount(ctx context.Context, ns, name string) (*corev1.ServiceAccount, error) { for { + sa, err := k.Clientset.CoreV1().ServiceAccounts(ns).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + k.Log("Service account %s/%s not found, retrying...", ns, name) + } else { + return nil, fmt.Errorf("error getting service account %s/%s: %w", ns, name, err) + } + } else { + return sa, nil + } + select { case <-ctx.Done(): return nil, fmt.Errorf("timed out waiting for service account %s/%s to exist: %w", ns, name, ctx.Err()) case <-time.After(1 * time.Second): - sa, err := k.Clientset.CoreV1().ServiceAccounts(ns).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - continue - } - return nil, fmt.Errorf("error getting service account %s/%s: %w", ns, name, err) - } - return sa, nil } } } From 3eb9a5b6add49a1db14f94faff5c93a31afcbabe Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Fri, 19 Apr 2024 13:08:35 -0500 Subject: [PATCH 38/62] Do not set timeout to fetch deployed pkg in LoadPackageMetadata() --- src/pkg/packager/sources/cluster.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/pkg/packager/sources/cluster.go b/src/pkg/packager/sources/cluster.go index 80f6b6007a..4f2ee9130a 100644 --- a/src/pkg/packager/sources/cluster.go +++ b/src/pkg/packager/sources/cluster.go @@ -60,8 +60,7 @@ func (s *ClusterSource) Collect(_ string) (string, error) { // LoadPackageMetadata loads package metadata from a cluster. func (s *ClusterSource) LoadPackageMetadata(dst *layout.PackagePaths, _ bool, _ bool) (types.ZarfPackage, []string, error) { - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) - defer cancel() + ctx := context.Background() dpkg, err := s.GetDeployedPackage(ctx, s.PackageSource) if err != nil { From 170d219b9a8739f2db92e87581b6941b6adbeaed Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Mon, 22 Apr 2024 15:03:30 -0500 Subject: [PATCH 39/62] Use timer in select statements rather than time.After() --- src/pkg/cluster/injector.go | 77 ++++++++++---------- src/pkg/cluster/zarf.go | 28 ++++---- src/pkg/k8s/common.go | 65 +++++++++-------- src/pkg/k8s/namespace.go | 15 ++-- src/pkg/k8s/pods.go | 138 +++++++++++++++++++----------------- src/pkg/k8s/sa.go | 22 +++--- src/pkg/k8s/tunnel.go | 14 ++-- 7 files changed, 195 insertions(+), 164 deletions(-) diff --git a/src/pkg/cluster/injector.go b/src/pkg/cluster/injector.go index 2fb721cb0b..dd2a918f98 100644 --- a/src/pkg/cluster/injector.go +++ b/src/pkg/cluster/injector.go @@ -440,54 +440,57 @@ func (c *Cluster) buildInjectionPod(node, image string, payloadConfigmaps []stri func (c *Cluster) getImagesAndNodesForInjection(ctx context.Context) (imageNodeMap, error) { result := make(imageNodeMap) - for { - pods, err := c.GetPods(ctx, corev1.NamespaceAll, metav1.ListOptions{ - FieldSelector: fmt.Sprintf("status.phase=%s", corev1.PodRunning), - }) - if err != nil { - return nil, fmt.Errorf("unable to get the list of %q pods in the cluster: %w", corev1.PodRunning, err) - } + timer := time.NewTimer(0) + defer timer.Stop() - for _, pod := range pods.Items { - nodeName := pod.Spec.NodeName - - nodeDetails, err := c.GetNode(ctx, nodeName) + for { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("get image list timed-out: %w", ctx.Err()) + case <-timer.C: + pods, err := c.GetPods(ctx, corev1.NamespaceAll, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("status.phase=%s", corev1.PodRunning), + }) if err != nil { - return nil, fmt.Errorf("unable to get the node %q: %w", nodeName, err) + return nil, fmt.Errorf("unable to get the list of %q pods in the cluster: %w", corev1.PodRunning, err) } - if nodeDetails.Status.Allocatable.Cpu().Cmp(injectorRequestedCPU) < 0 || - nodeDetails.Status.Allocatable.Memory().Cmp(injectorRequestedMemory) < 0 { - continue - } + for _, pod := range pods.Items { + nodeName := pod.Spec.NodeName - for _, taint := range nodeDetails.Spec.Taints { - if taint.Effect == corev1.TaintEffectNoSchedule || taint.Effect == corev1.TaintEffectNoExecute { + nodeDetails, err := c.GetNode(ctx, nodeName) + if err != nil { + return nil, fmt.Errorf("unable to get the node %q: %w", nodeName, err) + } + + if nodeDetails.Status.Allocatable.Cpu().Cmp(injectorRequestedCPU) < 0 || + nodeDetails.Status.Allocatable.Memory().Cmp(injectorRequestedMemory) < 0 { continue } - } - for _, container := range pod.Spec.InitContainers { - result[container.Image] = append(result[container.Image], nodeName) - } - for _, container := range pod.Spec.Containers { - result[container.Image] = append(result[container.Image], nodeName) - } - for _, container := range pod.Spec.EphemeralContainers { - result[container.Image] = append(result[container.Image], nodeName) - } - } + for _, taint := range nodeDetails.Spec.Taints { + if taint.Effect == corev1.TaintEffectNoSchedule || taint.Effect == corev1.TaintEffectNoExecute { + continue + } + } - if len(result) > 0 { - return result, nil - } + for _, container := range pod.Spec.InitContainers { + result[container.Image] = append(result[container.Image], nodeName) + } + for _, container := range pod.Spec.Containers { + result[container.Image] = append(result[container.Image], nodeName) + } + for _, container := range pod.Spec.EphemeralContainers { + result[container.Image] = append(result[container.Image], nodeName) + } + } - c.Log("No images found on any node. Retrying...") + if len(result) > 0 { + return result, nil + } - select { - case <-ctx.Done(): - return nil, fmt.Errorf("get image list timed-out: %w", ctx.Err()) - case <-time.After(2 * time.Second): + c.Log("No images found on any node. Retrying...") + timer.Reset(2 * time.Second) } } } diff --git a/src/pkg/cluster/zarf.go b/src/pkg/cluster/zarf.go index a37c4f5759..bc66a0000c 100644 --- a/src/pkg/cluster/zarf.go +++ b/src/pkg/cluster/zarf.go @@ -147,22 +147,26 @@ func (c *Cluster) RecordPackageDeploymentAndWait(ctx context.Context, pkg types. spinner := message.NewProgressSpinner("Waiting for webhook %q to complete for component %q", hookName, component.Name) defer spinner.Stop() - for { - deployedPackage, err = c.GetDeployedPackage(ctx, deployedPackage.Name) - if err != nil { - return nil, err - } - - packageNeedsWait, _, _ = c.PackageSecretNeedsWait(deployedPackage, component, skipWebhooks) - if !packageNeedsWait { - spinner.Success() - return deployedPackage, nil - } + timer := time.NewTimer(0) + defer timer.Stop() + for { select { case <-waitCtx.Done(): return nil, fmt.Errorf("timed out waiting for webhook %q to complete for component %q: %w", hookName, component.Name, waitCtx.Err()) - case <-time.After(1 * time.Second): + case <-timer.C: + deployedPackage, err = c.GetDeployedPackage(ctx, deployedPackage.Name) + if err != nil { + return nil, err + } + + packageNeedsWait, _, _ = c.PackageSecretNeedsWait(deployedPackage, component, skipWebhooks) + if !packageNeedsWait { + spinner.Success() + return deployedPackage, nil + } + + timer.Reset(1 * time.Second) } } } diff --git a/src/pkg/k8s/common.go b/src/pkg/k8s/common.go index 19731f470c..91410d206e 100644 --- a/src/pkg/k8s/common.go +++ b/src/pkg/k8s/common.go @@ -49,44 +49,47 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { var nodes *v1.NodeList var pods *v1.PodList + timer := time.NewTimer(0) + defer timer.Stop() + for { - if k.RestConfig == nil || k.Clientset == nil { - config, clientset, err := connect() - if err != nil { - k.Log("Cluster connection not available yet: %w", err) - continue + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting for cluster to report healthy: %w", ctx.Err()) + case <-timer.C: + if k.RestConfig == nil || k.Clientset == nil { + config, clientset, err := connect() + if err != nil { + k.Log("Cluster connection not available yet: %w", err) + continue + } + + k.RestConfig = config + k.Clientset = clientset } - k.RestConfig = config - k.Clientset = clientset - } - - // Make sure there is at least one running Node - nodes, err = k.GetNodes(ctx) - if err != nil || len(nodes.Items) < 1 { - k.Log("No nodes reporting healthy yet: %#v\n", err) - continue - } - - // Get the cluster pod list - if pods, err = k.GetAllPods(ctx); err != nil { - k.Log("Could not get the pod list: %w", err) - continue - } + // Make sure there is at least one running Node + nodes, err = k.GetNodes(ctx) + if err != nil || len(nodes.Items) < 1 { + k.Log("No nodes reporting healthy yet: %#v\n", err) + continue + } - // Check that at least one pod is in the 'succeeded' or 'running' state - for _, pod := range pods.Items { - if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodRunning { - return nil + // Get the cluster pod list + if pods, err = k.GetAllPods(ctx); err != nil { + k.Log("Could not get the pod list: %w", err) + continue } - } - k.Log("No pods reported 'succeeded' or 'running' state yet.") + // Check that at least one pod is in the 'succeeded' or 'running' state + for _, pod := range pods.Items { + if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodRunning { + return nil + } + } - select { - case <-ctx.Done(): - return fmt.Errorf("timed out waiting for cluster to report healthy: %w", ctx.Err()) - case <-time.After(1 * time.Second): + k.Log("No pods reported 'succeeded' or 'running' state yet.") + timer.Reset(1 * time.Second) } } } diff --git a/src/pkg/k8s/namespace.go b/src/pkg/k8s/namespace.go index db54fcbc5c..3a63b1ac52 100644 --- a/src/pkg/k8s/namespace.go +++ b/src/pkg/k8s/namespace.go @@ -49,15 +49,20 @@ func (k *K8s) DeleteNamespace(ctx context.Context, name string) error { return err } + timer := time.NewTimer(0) + defer timer.Stop() + for { - _, err := k.Clientset.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil - } select { case <-ctx.Done(): return ctx.Err() - case <-time.After(1 * time.Second): + case <-timer.C: + _, err := k.Clientset.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } + + timer.Reset(1 * time.Second) } } } diff --git a/src/pkg/k8s/pods.go b/src/pkg/k8s/pods.go index daea645eca..d703b887a7 100644 --- a/src/pkg/k8s/pods.go +++ b/src/pkg/k8s/pods.go @@ -44,16 +44,20 @@ func (k *K8s) DeletePod(ctx context.Context, namespace string, name string) erro return err } - for { - _, err := k.Clientset.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil - } + timer := time.NewTimer(0) + defer timer.Stop() + for { select { case <-ctx.Done(): return ctx.Err() - case <-time.After(1 * time.Second): + case <-timer.C: + _, err := k.Clientset.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } + + timer.Reset(1 * time.Second) } } } @@ -97,82 +101,86 @@ func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, in waitCtx, cancel := context.WithTimeout(ctx, 90*time.Second) defer cancel() + timer := time.NewTimer(0) + defer timer.Stop() + for { - pods, err := k.GetPods(ctx, target.Namespace, metav1.ListOptions{ - LabelSelector: target.Selector, - }) - if err != nil { - k.Log("Unable to find matching pods: %w", err) + select { + case <-waitCtx.Done(): + k.Log("Pod lookup timeout or context cancelled: %w", ctx.Err()) return nil - } + case <-timer.C: + pods, err := k.GetPods(ctx, target.Namespace, metav1.ListOptions{ + LabelSelector: target.Selector, + }) + if err != nil { + k.Log("Unable to find matching pods: %w", err) + return nil + } - k.Log("Found %d pods for target %#v", len(pods.Items), target) + k.Log("Found %d pods for target %#v", len(pods.Items), target) - var readyPods = []corev1.Pod{} + var readyPods = []corev1.Pod{} - // Sort the pods from newest to oldest - sort.Slice(pods.Items, func(i, j int) bool { - return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) - }) + // Sort the pods from newest to oldest + sort.Slice(pods.Items, func(i, j int) bool { + return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) + }) - for _, pod := range pods.Items { - k.Log("Testing pod %q", pod.Name) + for _, pod := range pods.Items { + k.Log("Testing pod %q", pod.Name) - // If an include function is provided, only keep pods that return true - if include != nil && !include(pod) { - continue - } + // If an include function is provided, only keep pods that return true + if include != nil && !include(pod) { + continue + } - // Handle container targeting - if target.Container != "" { - k.Log("Testing pod %q for container %q", pod.Name, target.Container) - var matchedContainer bool - - // Check the status of initContainers for a running match - for _, initContainer := range pod.Status.InitContainerStatuses { - isRunning := initContainer.State.Running != nil - if isRunning && initContainer.Name == target.Container { - // On running match in initContainer break this loop - matchedContainer = true - readyPods = append(readyPods, pod) + // Handle container targeting + if target.Container != "" { + k.Log("Testing pod %q for container %q", pod.Name, target.Container) + var matchedContainer bool + + // Check the status of initContainers for a running match + for _, initContainer := range pod.Status.InitContainerStatuses { + isRunning := initContainer.State.Running != nil + if isRunning && initContainer.Name == target.Container { + // On running match in initContainer break this loop + matchedContainer = true + readyPods = append(readyPods, pod) + break + } + } + if matchedContainer { break } - } - if matchedContainer { - break - } - // Check the status of regular containers for a running match - for _, container := range pod.Status.ContainerStatuses { - isRunning := container.State.Running != nil - if isRunning && container.Name == target.Container { - matchedContainer = true + // Check the status of regular containers for a running match + for _, container := range pod.Status.ContainerStatuses { + isRunning := container.State.Running != nil + if isRunning && container.Name == target.Container { + matchedContainer = true + readyPods = append(readyPods, pod) + break + } + } + if matchedContainer { + break + } + } else { + status := pod.Status.Phase + k.Log("Testing pod %q phase, want (%q) got (%q)", pod.Name, corev1.PodRunning, status) + // Regular status checking without a container + if status == corev1.PodRunning { readyPods = append(readyPods, pod) break } } - if matchedContainer { - break - } - } else { - status := pod.Status.Phase - k.Log("Testing pod %q phase, want (%q) got (%q)", pod.Name, corev1.PodRunning, status) - // Regular status checking without a container - if status == corev1.PodRunning { - readyPods = append(readyPods, pod) - break - } } - } - if len(readyPods) > 0 { - return readyPods - } + if len(readyPods) > 0 { + return readyPods + } - select { - case <-waitCtx.Done(): - k.Log("Pod lookup timeout or context cancelled: %w", ctx.Err()) - return nil - case <-time.After(3 * time.Second): + timer.Reset(3 * time.Second) } } } diff --git a/src/pkg/k8s/sa.go b/src/pkg/k8s/sa.go index 759ae799da..93545cec96 100644 --- a/src/pkg/k8s/sa.go +++ b/src/pkg/k8s/sa.go @@ -39,22 +39,26 @@ func (k *K8s) UpdateServiceAccount(ctx context.Context, svcAccount *corev1.Servi // WaitForServiceAccount waits for a service account to be created in the cluster. func (k *K8s) WaitForServiceAccount(ctx context.Context, ns, name string) (*corev1.ServiceAccount, error) { + timer := time.NewTimer(0) + defer timer.Stop() + for { - sa, err := k.Clientset.CoreV1().ServiceAccounts(ns).Get(ctx, name, metav1.GetOptions{}) - if err != nil { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("timed out waiting for service account %s/%s to exist: %w", ns, name, ctx.Err()) + case <-timer.C: + sa, err := k.Clientset.CoreV1().ServiceAccounts(ns).Get(ctx, name, metav1.GetOptions{}) + if err == nil { + return sa, nil + } + if errors.IsNotFound(err) { k.Log("Service account %s/%s not found, retrying...", ns, name) } else { return nil, fmt.Errorf("error getting service account %s/%s: %w", ns, name, err) } - } else { - return sa, nil - } - select { - case <-ctx.Done(): - return nil, fmt.Errorf("timed out waiting for service account %s/%s to exist: %w", ns, name, ctx.Err()) - case <-time.After(1 * time.Second): + timer.Reset(1 * time.Second) } } } diff --git a/src/pkg/k8s/tunnel.go b/src/pkg/k8s/tunnel.go index 8618156eb0..6c4f46e0cf 100644 --- a/src/pkg/k8s/tunnel.go +++ b/src/pkg/k8s/tunnel.go @@ -97,15 +97,19 @@ func (tunnel *Tunnel) Connect(ctx context.Context) (string, error) { tunnel.kube.Log("%s", err.Error()) tunnel.kube.Log("Delay creating tunnel, waiting %d seconds...", delay) + timer := time.NewTimer(0) + defer timer.Stop() + select { case <-ctx.Done(): return "", ctx.Err() - case <-time.After(time.Duration(delay) * time.Second): - } + case <-timer.C: + url, err = tunnel.Connect(ctx) + if err != nil { + return "", err + } - url, err = tunnel.Connect(ctx) - if err != nil { - return "", err + timer.Reset(time.Duration(delay) * time.Second) } } From 8f01e2787a0cfa91dc1fc0c979e6067ab2c65208 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Mon, 22 Apr 2024 16:04:10 -0500 Subject: [PATCH 40/62] Use timer.Reset() rather than continue in WaitForHealthyCluster() --- src/pkg/k8s/common.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pkg/k8s/common.go b/src/pkg/k8s/common.go index 91410d206e..7181bfc045 100644 --- a/src/pkg/k8s/common.go +++ b/src/pkg/k8s/common.go @@ -61,7 +61,7 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { config, clientset, err := connect() if err != nil { k.Log("Cluster connection not available yet: %w", err) - continue + timer.Reset(1 * time.Second) } k.RestConfig = config @@ -71,14 +71,14 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { // Make sure there is at least one running Node nodes, err = k.GetNodes(ctx) if err != nil || len(nodes.Items) < 1 { - k.Log("No nodes reporting healthy yet: %#v\n", err) - continue + k.Log("No nodes reporting healthy yet: %v\n", err) + timer.Reset(1 * time.Second) } // Get the cluster pod list if pods, err = k.GetAllPods(ctx); err != nil { k.Log("Could not get the pod list: %w", err) - continue + timer.Reset(1 * time.Second) } // Check that at least one pod is in the 'succeeded' or 'running' state From c9c6db043d4e6ae757c805bbdae070071be0e784 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 23 Apr 2024 10:52:39 -0500 Subject: [PATCH 41/62] Update src/pkg/k8s/sa.go Co-authored-by: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> --- src/pkg/k8s/sa.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pkg/k8s/sa.go b/src/pkg/k8s/sa.go index 93545cec96..38b7624130 100644 --- a/src/pkg/k8s/sa.go +++ b/src/pkg/k8s/sa.go @@ -45,7 +45,7 @@ func (k *K8s) WaitForServiceAccount(ctx context.Context, ns, name string) (*core for { select { case <-ctx.Done(): - return nil, fmt.Errorf("timed out waiting for service account %s/%s to exist: %w", ns, name, ctx.Err()) + return nil, fmt.Errorf("failed to get service account %s/%s: %w", ns, name, ctx.Err()) case <-timer.C: sa, err := k.Clientset.CoreV1().ServiceAccounts(ns).Get(ctx, name, metav1.GetOptions{}) if err == nil { From 06452bfc2dfeb110347a5e3b3c7b01ec2e64449a Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 23 Apr 2024 10:52:52 -0500 Subject: [PATCH 42/62] Update src/pkg/k8s/pods.go Co-authored-by: Austin Abro <37223396+AustinAbro321@users.noreply.github.com> --- src/pkg/k8s/pods.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pkg/k8s/pods.go b/src/pkg/k8s/pods.go index d703b887a7..d8c9c6d323 100644 --- a/src/pkg/k8s/pods.go +++ b/src/pkg/k8s/pods.go @@ -107,7 +107,7 @@ func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, in for { select { case <-waitCtx.Done(): - k.Log("Pod lookup timeout or context cancelled: %w", ctx.Err()) + k.Log("Pod lookup failed: %w", ctx.Err()) return nil case <-timer.C: pods, err := k.GetPods(ctx, target.Namespace, metav1.ListOptions{ From 1bfc32592395bcdbdcfd4537fe57527b4ea178bf Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 23 Apr 2024 10:55:38 -0500 Subject: [PATCH 43/62] Do not use timed out phrase in ctx.Done() cases --- src/pkg/cluster/zarf.go | 2 +- src/pkg/k8s/common.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pkg/cluster/zarf.go b/src/pkg/cluster/zarf.go index bc66a0000c..3522dde6b5 100644 --- a/src/pkg/cluster/zarf.go +++ b/src/pkg/cluster/zarf.go @@ -153,7 +153,7 @@ func (c *Cluster) RecordPackageDeploymentAndWait(ctx context.Context, pkg types. for { select { case <-waitCtx.Done(): - return nil, fmt.Errorf("timed out waiting for webhook %q to complete for component %q: %w", hookName, component.Name, waitCtx.Err()) + return nil, fmt.Errorf("error waiting for webhook %q to complete for component %q: %w", hookName, component.Name, waitCtx.Err()) case <-timer.C: deployedPackage, err = c.GetDeployedPackage(ctx, deployedPackage.Name) if err != nil { diff --git a/src/pkg/k8s/common.go b/src/pkg/k8s/common.go index 7181bfc045..a5ae58417e 100644 --- a/src/pkg/k8s/common.go +++ b/src/pkg/k8s/common.go @@ -55,7 +55,7 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { for { select { case <-ctx.Done(): - return fmt.Errorf("timed out waiting for cluster to report healthy: %w", ctx.Err()) + return fmt.Errorf("error waiting for cluster to report healthy: %w", ctx.Err()) case <-timer.C: if k.RestConfig == nil || k.Clientset == nil { config, clientset, err := connect() From 9b961ba9428c635534e455ee5843ce4f5315d4f8 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 23 Apr 2024 11:16:50 -0500 Subject: [PATCH 44/62] Use timer.Reset() and continue in WaitForHealthyCluster() --- src/pkg/k8s/common.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/pkg/k8s/common.go b/src/pkg/k8s/common.go index a5ae58417e..6b80384aef 100644 --- a/src/pkg/k8s/common.go +++ b/src/pkg/k8s/common.go @@ -45,9 +45,11 @@ func New(logger Log, defaultLabels Labels) (*K8s, error) { // WaitForHealthyCluster checks for an available K8s cluster every second until timeout. func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { - var err error - var nodes *v1.NodeList - var pods *v1.PodList + var ( + err error + nodes *v1.NodeList + pods *v1.PodList + ) timer := time.NewTimer(0) defer timer.Stop() @@ -62,6 +64,7 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { if err != nil { k.Log("Cluster connection not available yet: %w", err) timer.Reset(1 * time.Second) + continue } k.RestConfig = config @@ -73,12 +76,14 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { if err != nil || len(nodes.Items) < 1 { k.Log("No nodes reporting healthy yet: %v\n", err) timer.Reset(1 * time.Second) + continue } // Get the cluster pod list if pods, err = k.GetAllPods(ctx); err != nil { k.Log("Could not get the pod list: %w", err) timer.Reset(1 * time.Second) + continue } // Check that at least one pod is in the 'succeeded' or 'running' state From 74d39dbc145bd3ac1229cb28b13954fec7c87e78 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 23 Apr 2024 12:41:57 -0500 Subject: [PATCH 45/62] Use const for waitDuration in WaitForHealthyCluster() --- src/pkg/k8s/common.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/pkg/k8s/common.go b/src/pkg/k8s/common.go index 6b80384aef..44027f4492 100644 --- a/src/pkg/k8s/common.go +++ b/src/pkg/k8s/common.go @@ -51,6 +51,8 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { pods *v1.PodList ) + const waitDuration = 1 * time.Second + timer := time.NewTimer(0) defer timer.Stop() @@ -63,7 +65,7 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { config, clientset, err := connect() if err != nil { k.Log("Cluster connection not available yet: %w", err) - timer.Reset(1 * time.Second) + timer.Reset(waitDuration) continue } @@ -75,14 +77,14 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { nodes, err = k.GetNodes(ctx) if err != nil || len(nodes.Items) < 1 { k.Log("No nodes reporting healthy yet: %v\n", err) - timer.Reset(1 * time.Second) + timer.Reset(waitDuration) continue } // Get the cluster pod list if pods, err = k.GetAllPods(ctx); err != nil { k.Log("Could not get the pod list: %w", err) - timer.Reset(1 * time.Second) + timer.Reset(waitDuration) continue } @@ -94,7 +96,7 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { } k.Log("No pods reported 'succeeded' or 'running' state yet.") - timer.Reset(1 * time.Second) + timer.Reset(waitDuration) } } } From 5be68a500dbc33c26be8b16e9f188ef3b3b9af11 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 25 Apr 2024 17:51:53 -0500 Subject: [PATCH 46/62] dogsled stderr --- src/test/e2e/00_use_cli_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/e2e/00_use_cli_test.go b/src/test/e2e/00_use_cli_test.go index 10faf56964..fce9a095dc 100644 --- a/src/test/e2e/00_use_cli_test.go +++ b/src/test/e2e/00_use_cli_test.go @@ -241,8 +241,8 @@ func TestUseCLI(t *testing.T) { require.Contains(t, stdOut, "renamed-item") // Test that yq ea can be used properly - _, stdErr, err = e2e.Zarf("tools", "yq", "eval-all", "-i", `. as $doc ireduce ({}; .items += $doc.items)`, file, otherFile) - require.NoError(t, err, stdErr) + _, _, err = e2e.Zarf("tools", "yq", "eval-all", "-i", `. as $doc ireduce ({}; .items += $doc.items)`, file, otherFile) + require.NoError(t, err) stdOut, _, err = e2e.Zarf("tools", "yq", "e", ".items | length", file) require.NoError(t, err) require.Equal(t, "4\n", stdOut) From 6a53c92f88ef246cfe7985d829014d9b203ee65c Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 25 Apr 2024 17:53:31 -0500 Subject: [PATCH 47/62] sigh --- src/test/e2e/00_use_cli_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/test/e2e/00_use_cli_test.go b/src/test/e2e/00_use_cli_test.go index fce9a095dc..6b5a0e387c 100644 --- a/src/test/e2e/00_use_cli_test.go +++ b/src/test/e2e/00_use_cli_test.go @@ -234,8 +234,6 @@ func TestUseCLI(t *testing.T) { // Test that yq can eval properly _, stdErr, err := e2e.Zarf("tools", "yq", "eval", "-i", `.items[1].name = "renamed-item"`, file) require.NoError(t, err, stdErr) - _, _, err = e2e.Zarf("tools", "yq", ".items[1].name", file) - require.NoError(t, err) stdOut, _, err := e2e.Zarf("tools", "yq", ".items[1].name", file) require.NoError(t, err) require.Contains(t, stdOut, "renamed-item") From 6396af2ffdf6539f48cef90033ed4e862d16a6da Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 7 May 2024 15:32:03 -0500 Subject: [PATCH 48/62] Use single, global context for parent and child commands --- src/cmd/connect.go | 9 ++++----- src/cmd/destroy.go | 5 ++--- src/cmd/dev.go | 5 ++--- src/cmd/initialize.go | 5 ++--- src/cmd/internal.go | 13 ++++++------- src/cmd/package.go | 21 ++++++++++----------- src/cmd/root.go | 5 +++++ src/cmd/tools/crane.go | 9 ++++----- src/cmd/tools/zarf.go | 7 +++---- zarf-config.toml | 2 +- 10 files changed, 39 insertions(+), 42 deletions(-) diff --git a/src/cmd/connect.go b/src/cmd/connect.go index 0fbfc9d961..8bfc347349 100644 --- a/src/cmd/connect.go +++ b/src/cmd/connect.go @@ -5,7 +5,6 @@ package cmd import ( - "context" "fmt" "os" "os/signal" @@ -33,7 +32,7 @@ var ( Aliases: []string{"c"}, Short: lang.CmdConnectShort, Long: lang.CmdConnectLong, - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { var target string if len(args) > 0 { target = args[0] @@ -44,7 +43,7 @@ var ( spinner.Fatalf(err, lang.CmdConnectErrCluster, err.Error()) } - ctx := context.Background() + ctx := cmd.Context() var tunnel *k8s.Tunnel if connectResourceName != "" { @@ -93,8 +92,8 @@ var ( Use: "list", Aliases: []string{"l"}, Short: lang.CmdConnectListShort, - Run: func(_ *cobra.Command, _ []string) { - ctx := context.Background() + Run: func(cmd *cobra.Command, _ []string) { + ctx := cmd.Context() if err := common.NewClusterOrDie().PrintConnectTable(ctx); err != nil { message.Fatal(err, err.Error()) } diff --git a/src/cmd/destroy.go b/src/cmd/destroy.go index d9eeb3df40..826ce792fc 100644 --- a/src/cmd/destroy.go +++ b/src/cmd/destroy.go @@ -5,7 +5,6 @@ package cmd import ( - "context" "errors" "os" "regexp" @@ -29,9 +28,9 @@ var destroyCmd = &cobra.Command{ Aliases: []string{"d"}, Short: lang.CmdDestroyShort, Long: lang.CmdDestroyLong, - Run: func(_ *cobra.Command, _ []string) { + Run: func(cmd *cobra.Command, _ []string) { c := common.NewClusterOrDie() - ctx := context.Background() + ctx := cmd.Context() // NOTE: If 'zarf init' failed to deploy the k3s component (or if we're looking at the wrong kubeconfig) // there will be no zarf-state to load and the struct will be empty. In these cases, if we can find diff --git a/src/cmd/dev.go b/src/cmd/dev.go index 65560e8452..89b46508f5 100644 --- a/src/cmd/dev.go +++ b/src/cmd/dev.go @@ -5,7 +5,6 @@ package cmd import ( - "context" "fmt" "io" "os" @@ -41,7 +40,7 @@ var devDeployCmd = &cobra.Command{ Args: cobra.MaximumNArgs(1), Short: lang.CmdDevDeployShort, Long: lang.CmdDevDeployLong, - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { pkgConfig.CreateOpts.BaseDir = common.SetBaseDirectory(args) v := common.GetViper() @@ -54,7 +53,7 @@ var devDeployCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - ctx := context.Background() + ctx := cmd.Context() if err := pkgClient.DevDeploy(ctx); err != nil { message.Fatalf(err, lang.CmdDevDeployErr, err.Error()) diff --git a/src/cmd/initialize.go b/src/cmd/initialize.go index f076e05536..b41c3ca8d9 100644 --- a/src/cmd/initialize.go +++ b/src/cmd/initialize.go @@ -5,7 +5,6 @@ package cmd import ( - "context" "errors" "fmt" "os" @@ -36,7 +35,7 @@ var initCmd = &cobra.Command{ Short: lang.CmdInitShort, Long: lang.CmdInitLong, Example: lang.CmdInitExample, - Run: func(_ *cobra.Command, _ []string) { + Run: func(cmd *cobra.Command, _ []string) { zarfLogo := message.GetLogo() _, _ = fmt.Fprintln(os.Stderr, zarfLogo) @@ -66,7 +65,7 @@ var initCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig, packager.WithSource(src)) defer pkgClient.ClearTempPaths() - ctx := context.Background() + ctx := cmd.Context() err = pkgClient.Deploy(ctx) if err != nil { diff --git a/src/cmd/internal.go b/src/cmd/internal.go index 2781df50c8..40ae382f84 100644 --- a/src/cmd/internal.go +++ b/src/cmd/internal.go @@ -5,7 +5,6 @@ package cmd import ( - "context" "encoding/json" "fmt" "os" @@ -194,8 +193,8 @@ var createReadOnlyGiteaUser = &cobra.Command{ Use: "create-read-only-gitea-user", Short: lang.CmdInternalCreateReadOnlyGiteaUserShort, Long: lang.CmdInternalCreateReadOnlyGiteaUserLong, - Run: func(_ *cobra.Command, _ []string) { - ctx := context.Background() + Run: func(cmd *cobra.Command, _ []string) { + ctx := cmd.Context() // Load the state so we can get the credentials for the admin git user state, err := common.NewClusterOrDie().LoadZarfState(ctx) @@ -214,9 +213,9 @@ var createPackageRegistryToken = &cobra.Command{ Use: "create-artifact-registry-token", Short: lang.CmdInternalArtifactRegistryGiteaTokenShort, Long: lang.CmdInternalArtifactRegistryGiteaTokenLong, - Run: func(_ *cobra.Command, _ []string) { + Run: func(cmd *cobra.Command, _ []string) { c := common.NewClusterOrDie() - ctx := context.Background() + ctx := cmd.Context() state, err := c.LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) @@ -242,8 +241,8 @@ var updateGiteaPVC = &cobra.Command{ Use: "update-gitea-pvc", Short: lang.CmdInternalUpdateGiteaPVCShort, Long: lang.CmdInternalUpdateGiteaPVCLong, - Run: func(_ *cobra.Command, _ []string) { - ctx := context.Background() + Run: func(cmd *cobra.Command, _ []string) { + ctx := cmd.Context() // There is a possibility that the pvc does not yet exist and Gitea helm chart should create it helmShouldCreate, err := git.UpdateGiteaPVC(ctx, rollback) diff --git a/src/cmd/package.go b/src/cmd/package.go index 7b46474f02..07cda41159 100644 --- a/src/cmd/package.go +++ b/src/cmd/package.go @@ -5,7 +5,6 @@ package cmd import ( - "context" "fmt" "path/filepath" "regexp" @@ -68,7 +67,7 @@ var packageDeployCmd = &cobra.Command{ Short: lang.CmdPackageDeployShort, Long: lang.CmdPackageDeployLong, Args: cobra.MaximumNArgs(1), - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { pkgConfig.PkgOpts.PackageSource = choosePackage(args) v := common.GetViper() @@ -78,7 +77,7 @@ var packageDeployCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - ctx := context.Background() + ctx := cmd.Context() if err := pkgClient.Deploy(ctx); err != nil { message.Fatalf(err, lang.CmdPackageDeployErr, err.Error()) @@ -93,13 +92,13 @@ var packageMirrorCmd = &cobra.Command{ Long: lang.CmdPackageMirrorLong, Example: lang.CmdPackageMirrorExample, Args: cobra.MaximumNArgs(1), - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { pkgConfig.PkgOpts.PackageSource = choosePackage(args) pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - ctx := context.Background() + ctx := cmd.Context() if err := pkgClient.Mirror(ctx); err != nil { message.Fatalf(err, lang.CmdPackageDeployErr, err.Error()) @@ -132,8 +131,8 @@ var packageListCmd = &cobra.Command{ Use: "list", Aliases: []string{"l", "ls"}, Short: lang.CmdPackageListShort, - Run: func(_ *cobra.Command, _ []string) { - ctx := context.Background() + Run: func(cmd *cobra.Command, _ []string) { + ctx := cmd.Context() deployedZarfPackages, errs := common.NewClusterOrDie().GetDeployedZarfPackages(ctx) if len(errs) > 0 && len(deployedZarfPackages) == 0 { message.Fatalf(errs, lang.CmdPackageListNoPackageWarn) @@ -169,7 +168,7 @@ var packageRemoveCmd = &cobra.Command{ Aliases: []string{"u", "rm"}, Args: cobra.MaximumNArgs(1), Short: lang.CmdPackageRemoveShort, - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { pkgConfig.PkgOpts.PackageSource = choosePackage(args) src := identifyAndFallbackToClusterSource() @@ -177,7 +176,7 @@ var packageRemoveCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig, packager.WithSource(src)) defer pkgClient.ClearTempPaths() - ctx := context.Background() + ctx := cmd.Context() if err := pkgClient.Remove(ctx); err != nil { message.Fatalf(err, lang.CmdPackageRemoveErr, err.Error()) @@ -278,7 +277,7 @@ func identifyAndFallbackToClusterSource() (src sources.PackageSource) { return src } -func getPackageCompletionArgs(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { +func getPackageCompletionArgs(cmd *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { var pkgCandidates []string c, err := cluster.NewCluster() @@ -286,7 +285,7 @@ func getPackageCompletionArgs(_ *cobra.Command, _ []string, _ string) ([]string, return pkgCandidates, cobra.ShellCompDirectiveDefault } - ctx := context.Background() + ctx := cmd.Context() deployedZarfPackages, _ := c.GetDeployedZarfPackages(ctx) // Populate list of package names diff --git a/src/cmd/root.go b/src/cmd/root.go index 9563f382d3..83fb28e61d 100644 --- a/src/cmd/root.go +++ b/src/cmd/root.go @@ -5,6 +5,7 @@ package cmd import ( + "context" "fmt" "os" "strings" @@ -37,6 +38,10 @@ var rootCmd = &cobra.Command{ config.SkipLogFile = true } + // Set the global context for the root command and all child commands + ctx := context.Background() + cmd.SetContext(ctx) + common.SetupCLI() }, Short: lang.RootCmdShort, diff --git a/src/cmd/tools/crane.go b/src/cmd/tools/crane.go index d4fde65413..8257a806d1 100644 --- a/src/cmd/tools/crane.go +++ b/src/cmd/tools/crane.go @@ -5,7 +5,6 @@ package tools import ( - "context" "fmt" "os" "strings" @@ -124,7 +123,7 @@ func zarfCraneCatalog(cranePlatformOptions *[]crane.Option) *cobra.Command { return err } - ctx := context.Background() + ctx := cmd.Context() zarfState, err := c.LoadZarfState(ctx) if err != nil { @@ -174,7 +173,7 @@ func zarfCraneInternalWrapper(commandToWrap func(*[]crane.Option) *cobra.Command message.Note(lang.CmdToolsRegistryZarfState) - ctx := context.Background() + ctx := cmd.Context() zarfState, err := c.LoadZarfState(ctx) if err != nil { @@ -213,14 +212,14 @@ func zarfCraneInternalWrapper(commandToWrap func(*[]crane.Option) *cobra.Command return wrappedCommand } -func pruneImages(_ *cobra.Command, _ []string) error { +func pruneImages(cmd *cobra.Command, _ []string) error { // Try to connect to a Zarf initialized cluster c, err := cluster.NewCluster() if err != nil { return err } - ctx := context.Background() + ctx := cmd.Context() zarfState, err := c.LoadZarfState(ctx) if err != nil { diff --git a/src/cmd/tools/zarf.go b/src/cmd/tools/zarf.go index 77763512aa..130be78212 100644 --- a/src/cmd/tools/zarf.go +++ b/src/cmd/tools/zarf.go @@ -5,7 +5,6 @@ package tools import ( - "context" "fmt" "os" @@ -51,8 +50,8 @@ var getCredsCmd = &cobra.Command{ Example: lang.CmdToolsGetCredsExample, Aliases: []string{"gc"}, Args: cobra.MaximumNArgs(1), - Run: func(_ *cobra.Command, args []string) { - ctx := context.Background() + Run: func(cmd *cobra.Command, args []string) { + ctx := cmd.Context() state, err := common.NewClusterOrDie().LoadZarfState(ctx) if err != nil || state.Distro == "" { // If no distro the zarf secret did not load properly @@ -87,7 +86,7 @@ var updateCredsCmd = &cobra.Command{ } c := common.NewClusterOrDie() - ctx := context.Background() + ctx := cmd.Context() oldState, err := c.LoadZarfState(ctx) if err != nil || oldState.Distro == "" { // If no distro the zarf secret did not load properly diff --git a/zarf-config.toml b/zarf-config.toml index 4566da627e..19cefd4686 100644 --- a/zarf-config.toml +++ b/zarf-config.toml @@ -7,7 +7,7 @@ agent_image_tag = 'local' # Tag for the zarf injector binary to use injector_version = '2024-05-03' injector_amd64_shasum = 'e5a3d380bac4bf6c68ba18275d6a92bb002e86c116eb364f960d393fd2f44da8' -injector_arm64_shasum = 'f7f26e37a514f2ca36d795b7611d64491398a4bdc424e0045875af391dc28659' +injector_arm64_shasum = '866b5b1911dd920618be55164c4f95abe05753932ac6d0f2270d26e569d797a2' # The image reference to use for the registry that Zarf deploys into the cluster registry_image_domain = '' From 29d2367fe125587b961a83bd6c41040006bee932 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 9 May 2024 01:38:05 -0500 Subject: [PATCH 49/62] Undo trivial lint fix --- src/cmd/tools/helm/root.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/tools/helm/root.go b/src/cmd/tools/helm/root.go index 8f18a91827..e496a93cec 100644 --- a/src/cmd/tools/helm/root.go +++ b/src/cmd/tools/helm/root.go @@ -156,7 +156,7 @@ func NewRootCmd(actionConfig *action.Configuration, out io.Writer, args []string // This call is required to gather configuration information prior to // execution. flags.ParseErrorsWhitelist.UnknownFlags = true - _ = flags.Parse(args) + flags.Parse(args) registryClient, err := newDefaultRegistryClient() if err != nil { From e98fc9a4346e8d374b40738dee569868872c0032 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 9 May 2024 02:01:13 -0500 Subject: [PATCH 50/62] Implement PR feedback --- src/test/e2e/21_connect_creds_test.go | 4 ++-- src/test/e2e/22_git_and_gitops_test.go | 2 +- src/test/e2e/23_data_injection_test.go | 10 ++++++---- src/test/e2e/26_simple_packages_test.go | 2 +- src/test/e2e/30_config_file_test.go | 7 ++----- src/test/e2e/99_yolo_test.go | 2 +- 6 files changed, 13 insertions(+), 14 deletions(-) diff --git a/src/test/e2e/21_connect_creds_test.go b/src/test/e2e/21_connect_creds_test.go index f742e9e80a..a66d390f18 100644 --- a/src/test/e2e/21_connect_creds_test.go +++ b/src/test/e2e/21_connect_creds_test.go @@ -28,7 +28,7 @@ func TestConnectAndCreds(t *testing.T) { prevAgentSecretData, _, err := e2e.Kubectl("get", "secret", "agent-hook-tls", "-n", "zarf", "-o", "jsonpath={.data}") require.NoError(t, err) - ctx := context.TODO() + ctx := context.Background() connectToZarfServices(ctx, t) @@ -71,7 +71,7 @@ func TestMetrics(t *testing.T) { tunnel, err := c.NewTunnel("zarf", "svc", "agent-hook", "", 8888, 8443) require.NoError(t, err) - _, err = tunnel.Connect(context.TODO()) + _, err = tunnel.Connect(context.Background()) require.NoError(t, err) defer tunnel.Close() diff --git a/src/test/e2e/22_git_and_gitops_test.go b/src/test/e2e/22_git_and_gitops_test.go index cb657d48cf..11a09755eb 100644 --- a/src/test/e2e/22_git_and_gitops_test.go +++ b/src/test/e2e/22_git_and_gitops_test.go @@ -37,7 +37,7 @@ func TestGit(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - ctx := context.TODO() + ctx := context.Background() tunnelGit, err := c.Connect(ctx, cluster.ZarfGit) require.NoError(t, err) defer tunnelGit.Close() diff --git a/src/test/e2e/23_data_injection_test.go b/src/test/e2e/23_data_injection_test.go index ce986859ee..efbce9bc13 100644 --- a/src/test/e2e/23_data_injection_test.go +++ b/src/test/e2e/23_data_injection_test.go @@ -21,6 +21,8 @@ func TestDataInjection(t *testing.T) { t.Log("E2E: Data injection") e2e.SetupWithCluster(t) + ctx := context.Background() + path := fmt.Sprintf("build/zarf-package-kiwix-%s-3.5.0.tar", e2e.Arch) tmpdir := t.TempDir() @@ -28,7 +30,7 @@ func TestDataInjection(t *testing.T) { // Repeat the injection action 3 times to ensure the data injection is idempotent and doesn't fail to perform an upgrade for i := 0; i < 3; i++ { - runDataInjection(t, path) + runDataInjection(ctx, t, path) } // Verify the file and injection marker were created @@ -42,7 +44,7 @@ func TestDataInjection(t *testing.T) { // need target to equal svc that we are trying to connect to call checkForZarfConnectLabel c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect(context.TODO(), "kiwix") + tunnel, err := c.Connect(ctx, "kiwix") require.NoError(t, err) defer tunnel.Close() @@ -64,9 +66,9 @@ func TestDataInjection(t *testing.T) { require.FileExists(t, filepath.Join(sbomPath, "kiwix", "zarf-component-kiwix-serve.json"), "The data-injection component should have an SBOM json") } -func runDataInjection(t *testing.T, path string) { +func runDataInjection(ctx context.Context, t *testing.T, path string) { // Limit this deploy to 5 minutes - ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute) + ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) defer cancel() // Deploy the data injection example diff --git a/src/test/e2e/26_simple_packages_test.go b/src/test/e2e/26_simple_packages_test.go index 25fa7115e9..c7a96e80d6 100644 --- a/src/test/e2e/26_simple_packages_test.go +++ b/src/test/e2e/26_simple_packages_test.go @@ -27,7 +27,7 @@ func TestDosGames(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect(context.TODO(), "doom") + tunnel, err := c.Connect(context.Background(), "doom") require.NoError(t, err) defer tunnel.Close() diff --git a/src/test/e2e/30_config_file_test.go b/src/test/e2e/30_config_file_test.go index 31425de829..bf9b5e5988 100644 --- a/src/test/e2e/30_config_file_test.go +++ b/src/test/e2e/30_config_file_test.go @@ -6,7 +6,6 @@ package test import ( "fmt" - "os" "path/filepath" "testing" @@ -26,8 +25,7 @@ func TestConfigFile(t *testing.T) { e2e.CleanFiles(path) // Test the config file environment variable - os.Setenv("ZARF_CONFIG", filepath.Join(dir, config)) - defer os.Unsetenv("ZARF_CONFIG") + t.Setenv("ZARF_CONFIG", filepath.Join(dir, config)) configFileTests(t, dir, path) configFileDefaultTests(t) @@ -139,8 +137,7 @@ func configFileDefaultTests(t *testing.T) { } // Test remaining default initializers - os.Setenv("ZARF_CONFIG", filepath.Join("src", "test", "zarf-config-test.toml")) - defer os.Unsetenv("ZARF_CONFIG") + t.Setenv("ZARF_CONFIG", filepath.Join("src", "test", "zarf-config-test.toml")) // Test global flags stdOut, _, _ := e2e.Zarf("--help") diff --git a/src/test/e2e/99_yolo_test.go b/src/test/e2e/99_yolo_test.go index 1c843501a3..bce9ab1450 100644 --- a/src/test/e2e/99_yolo_test.go +++ b/src/test/e2e/99_yolo_test.go @@ -36,7 +36,7 @@ func TestYOLOMode(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect(context.TODO(), "doom") + tunnel, err := c.Connect(context.Background(), "doom") require.NoError(t, err) defer tunnel.Close() From 043f28e9a3c16c308b39169ab673d8b66b13617e Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 9 May 2024 02:06:28 -0500 Subject: [PATCH 51/62] Use real context in bb test --- src/extensions/bigbang/test/bigbang_test.go | 32 +++++++++++---------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/src/extensions/bigbang/test/bigbang_test.go b/src/extensions/bigbang/test/bigbang_test.go index e6b78c5e23..d7e4edc173 100644 --- a/src/extensions/bigbang/test/bigbang_test.go +++ b/src/extensions/bigbang/test/bigbang_test.go @@ -60,45 +60,47 @@ func TestReleases(t *testing.T) { zarfCache = fmt.Sprintf("--zarf-cache=%s", CIMount) } + ctx := context.Background() + // Initialize the cluster with the Git server and AMD64 architecture arch := "amd64" - stdOut, stdErr, err := zarfExec("init", "--components", "git-server", "--architecture", arch, tmpdir, "--confirm", zarfCache) + stdOut, stdErr, err := zarfExec(ctx, "init", "--components", "git-server", "--architecture", arch, tmpdir, "--confirm", zarfCache) require.NoError(t, err, stdOut, stdErr) // Remove the init package to free up disk space on the test runner - err = os.RemoveAll(fmt.Sprintf("zarf-init-%s-%s.tar.zst", arch, getZarfVersion(t))) + err = os.RemoveAll(fmt.Sprintf("zarf-init-%s-%s.tar.zst", arch, getZarfVersion(ctx, t))) require.NoError(t, err) // Build the previous version bbVersion := fmt.Sprintf("--set=BB_VERSION=%s", previous) bbMajor := fmt.Sprintf("--set=BB_MAJOR=%s", previous[0:1]) - stdOut, stdErr, err = zarfExec("package", "create", "../src/extensions/bigbang/test/package", bbVersion, bbMajor, tmpdir, "--confirm") + stdOut, stdErr, err = zarfExec(ctx, "package", "create", "../src/extensions/bigbang/test/package", bbVersion, bbMajor, tmpdir, "--confirm") require.NoError(t, err, stdOut, stdErr) // Clean up zarf cache to reduce disk pressure - stdOut, stdErr, err = zarfExec("tools", "clear-cache") + stdOut, stdErr, err = zarfExec(ctx, "tools", "clear-cache") require.NoError(t, err, stdOut, stdErr) // Deploy the previous version pkgPath := fmt.Sprintf("zarf-package-big-bang-test-%s-%s.tar.zst", arch, previous) - stdOut, stdErr, err = zarfExec("package", "deploy", pkgPath, tmpdir, "--confirm") + stdOut, stdErr, err = zarfExec(ctx, "package", "deploy", pkgPath, tmpdir, "--confirm") require.NoError(t, err, stdOut, stdErr) // HACK: scale down the flux deployments due to very-low CPU in the test runner fluxControllers := []string{"helm-controller", "source-controller", "kustomize-controller", "notification-controller"} for _, deployment := range fluxControllers { - stdOut, stdErr, err = zarfExec("tools", "kubectl", "-n", "flux-system", "scale", "deployment", deployment, "--replicas=0") + stdOut, stdErr, err = zarfExec(ctx, "tools", "kubectl", "-n", "flux-system", "scale", "deployment", deployment, "--replicas=0") require.NoError(t, err, stdOut, stdErr) } // Cluster info - stdOut, stdErr, err = zarfExec("tools", "kubectl", "describe", "nodes") + stdOut, stdErr, err = zarfExec(ctx, "tools", "kubectl", "describe", "nodes") require.NoError(t, err, stdOut, stdErr) // Build the latest version bbVersion = fmt.Sprintf("--set=BB_VERSION=%s", latest) bbMajor = fmt.Sprintf("--set=BB_MAJOR=%s", latest[0:1]) - stdOut, stdErr, err = zarfExec("package", "create", "../src/extensions/bigbang/test/package", bbVersion, bbMajor, "--differential", pkgPath, tmpdir, "--confirm") + stdOut, stdErr, err = zarfExec(ctx, "package", "create", "../src/extensions/bigbang/test/package", bbVersion, bbMajor, "--differential", pkgPath, tmpdir, "--confirm") require.NoError(t, err, stdOut, stdErr) // Remove the previous version package @@ -106,16 +108,16 @@ func TestReleases(t *testing.T) { require.NoError(t, err) // Clean up zarf cache to reduce disk pressure - stdOut, stdErr, err = zarfExec("tools", "clear-cache") + stdOut, stdErr, err = zarfExec(ctx, "tools", "clear-cache") require.NoError(t, err, stdOut, stdErr) // Deploy the latest version pkgPath = fmt.Sprintf("zarf-package-big-bang-test-%s-%s-differential-%s.tar.zst", arch, previous, latest) - stdOut, stdErr, err = zarfExec("package", "deploy", pkgPath, tmpdir, "--confirm") + stdOut, stdErr, err = zarfExec(ctx, "package", "deploy", pkgPath, tmpdir, "--confirm") require.NoError(t, err, stdOut, stdErr) // Cluster info - stdOut, stdErr, err = zarfExec("tools", "kubectl", "describe", "nodes") + stdOut, stdErr, err = zarfExec(ctx, "tools", "kubectl", "describe", "nodes") require.NoError(t, err, stdOut, stdErr) // Test connectivity to Twistlock @@ -140,14 +142,14 @@ func testConnection(ctx context.Context, t *testing.T) { require.Equal(t, 200, resp.StatusCode) } -func zarfExec(args ...string) (string, string, error) { - return exec.CmdWithContext(context.TODO(), exec.PrintCfg(), zarf, args...) +func zarfExec(ctx context.Context, args ...string) (string, string, error) { + return exec.CmdWithContext(ctx, exec.PrintCfg(), zarf, args...) } // getZarfVersion returns the current build/zarf version -func getZarfVersion(t *testing.T) string { +func getZarfVersion(ctx context.Context, t *testing.T) string { // Get the version of the CLI - stdOut, stdErr, err := zarfExec("version") + stdOut, stdErr, err := zarfExec(ctx, "version") require.NoError(t, err, stdOut, stdErr) return strings.Trim(stdOut, "\n") } From e7896cd5e15b953be763c89b87f35792ff99b76c Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 9 May 2024 02:07:33 -0500 Subject: [PATCH 52/62] sigh --- src/extensions/bigbang/test/bigbang_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/extensions/bigbang/test/bigbang_test.go b/src/extensions/bigbang/test/bigbang_test.go index d7e4edc173..4e016c6a6f 100644 --- a/src/extensions/bigbang/test/bigbang_test.go +++ b/src/extensions/bigbang/test/bigbang_test.go @@ -121,7 +121,7 @@ func TestReleases(t *testing.T) { require.NoError(t, err, stdOut, stdErr) // Test connectivity to Twistlock - testConnection(context.TODO(), t) + testConnection(ctx, t) } func testConnection(ctx context.Context, t *testing.T) { From daa77ef7e305b4ce822547afe4cadb53dec33733 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 9 May 2024 03:18:32 -0500 Subject: [PATCH 53/62] Fix config file test --- src/test/e2e/30_config_file_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/test/e2e/30_config_file_test.go b/src/test/e2e/30_config_file_test.go index bf9b5e5988..5095efa91f 100644 --- a/src/test/e2e/30_config_file_test.go +++ b/src/test/e2e/30_config_file_test.go @@ -6,6 +6,7 @@ package test import ( "fmt" + "os" "path/filepath" "testing" @@ -26,6 +27,7 @@ func TestConfigFile(t *testing.T) { // Test the config file environment variable t.Setenv("ZARF_CONFIG", filepath.Join(dir, config)) + defer os.Unsetenv("ZARF_CONFIG") configFileTests(t, dir, path) configFileDefaultTests(t) @@ -138,6 +140,7 @@ func configFileDefaultTests(t *testing.T) { // Test remaining default initializers t.Setenv("ZARF_CONFIG", filepath.Join("src", "test", "zarf-config-test.toml")) + defer os.Unsetenv("ZARF_CONFIG") // Test global flags stdOut, _, _ := e2e.Zarf("--help") From df57e2f4df92ffb7809db999252eeff50d37b35f Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 9 May 2024 16:10:08 -0500 Subject: [PATCH 54/62] Check container name before running state --- src/pkg/k8s/pods.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pkg/k8s/pods.go b/src/pkg/k8s/pods.go index d8c9c6d323..62fe5513f0 100644 --- a/src/pkg/k8s/pods.go +++ b/src/pkg/k8s/pods.go @@ -107,7 +107,7 @@ func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, in for { select { case <-waitCtx.Done(): - k.Log("Pod lookup failed: %w", ctx.Err()) + k.Log("Pod lookup failed: %v", ctx.Err()) return nil case <-timer.C: pods, err := k.GetPods(ctx, target.Namespace, metav1.ListOptions{ @@ -143,7 +143,7 @@ func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, in // Check the status of initContainers for a running match for _, initContainer := range pod.Status.InitContainerStatuses { isRunning := initContainer.State.Running != nil - if isRunning && initContainer.Name == target.Container { + if initContainer.Name == target.Container && isRunning { // On running match in initContainer break this loop matchedContainer = true readyPods = append(readyPods, pod) @@ -157,7 +157,7 @@ func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, in // Check the status of regular containers for a running match for _, container := range pod.Status.ContainerStatuses { isRunning := container.State.Running != nil - if isRunning && container.Name == target.Container { + if container.Name == target.Container && isRunning { matchedContainer = true readyPods = append(readyPods, pod) break From be801e24d421b9617b416f8b6dddb2e0241da464 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 9 May 2024 16:25:52 -0500 Subject: [PATCH 55/62] Remove unnecessary matchedContainer boolean --- src/pkg/k8s/pods.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/pkg/k8s/pods.go b/src/pkg/k8s/pods.go index 62fe5513f0..be9c72bec4 100644 --- a/src/pkg/k8s/pods.go +++ b/src/pkg/k8s/pods.go @@ -138,34 +138,25 @@ func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, in // Handle container targeting if target.Container != "" { k.Log("Testing pod %q for container %q", pod.Name, target.Container) - var matchedContainer bool // Check the status of initContainers for a running match for _, initContainer := range pod.Status.InitContainerStatuses { isRunning := initContainer.State.Running != nil if initContainer.Name == target.Container && isRunning { // On running match in initContainer break this loop - matchedContainer = true readyPods = append(readyPods, pod) break } } - if matchedContainer { - break - } // Check the status of regular containers for a running match for _, container := range pod.Status.ContainerStatuses { isRunning := container.State.Running != nil if container.Name == target.Container && isRunning { - matchedContainer = true readyPods = append(readyPods, pod) break } } - if matchedContainer { - break - } } else { status := pod.Status.Phase k.Log("Testing pod %q phase, want (%q) got (%q)", pod.Name, corev1.PodRunning, status) @@ -179,7 +170,6 @@ func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, in if len(readyPods) > 0 { return readyPods } - timer.Reset(3 * time.Second) } } From b25db88ad88f7619f3612f00ffe9f025d02e5b68 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 9 May 2024 16:30:54 -0500 Subject: [PATCH 56/62] Use real context in unit test --- src/pkg/packager/common_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pkg/packager/common_test.go b/src/pkg/packager/common_test.go index 3ead7f9b4b..c0b970c6b6 100644 --- a/src/pkg/packager/common_test.go +++ b/src/pkg/packager/common_test.go @@ -133,7 +133,7 @@ func TestValidatePackageArchitecture(t *testing.T) { return true, nodeList, nil }) - err := p.validatePackageArchitecture(context.TODO()) + err := p.validatePackageArchitecture(context.Background()) require.Equal(t, testCase.expectedError, err) }) From 7099dd6f60c3c98b32394c766fd48688fc23cf48 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Thu, 9 May 2024 16:37:19 -0500 Subject: [PATCH 57/62] Pass ctx to NewClusterOrDie() --- src/cmd/common/utils.go | 6 +++--- src/cmd/connect.go | 2 +- src/cmd/destroy.go | 2 +- src/cmd/internal.go | 4 ++-- src/cmd/package.go | 2 +- src/cmd/tools/zarf.go | 4 ++-- src/test/e2e/22_git_and_gitops_test.go | 4 ++-- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/cmd/common/utils.go b/src/cmd/common/utils.go index 9bf23d4a97..4fe6fa5043 100644 --- a/src/cmd/common/utils.go +++ b/src/cmd/common/utils.go @@ -39,10 +39,10 @@ func ExitOnInterrupt() { } // NewClusterOrDie creates a new Cluster instance and waits for the cluster to be ready or throws a fatal error. -func NewClusterOrDie() *cluster.Cluster { - ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) +func NewClusterOrDie(ctx context.Context) *cluster.Cluster { + timeoutCtx, cancel := context.WithTimeout(ctx, cluster.DefaultTimeout) defer cancel() - c, err := cluster.NewClusterWithWait(ctx) + c, err := cluster.NewClusterWithWait(timeoutCtx) if err != nil { message.Fatalf(err, "Failed to connect to cluster") } diff --git a/src/cmd/connect.go b/src/cmd/connect.go index 8bfc347349..19422df967 100644 --- a/src/cmd/connect.go +++ b/src/cmd/connect.go @@ -94,7 +94,7 @@ var ( Short: lang.CmdConnectListShort, Run: func(cmd *cobra.Command, _ []string) { ctx := cmd.Context() - if err := common.NewClusterOrDie().PrintConnectTable(ctx); err != nil { + if err := common.NewClusterOrDie(ctx).PrintConnectTable(ctx); err != nil { message.Fatal(err, err.Error()) } }, diff --git a/src/cmd/destroy.go b/src/cmd/destroy.go index 826ce792fc..429aec2c16 100644 --- a/src/cmd/destroy.go +++ b/src/cmd/destroy.go @@ -29,8 +29,8 @@ var destroyCmd = &cobra.Command{ Short: lang.CmdDestroyShort, Long: lang.CmdDestroyLong, Run: func(cmd *cobra.Command, _ []string) { - c := common.NewClusterOrDie() ctx := cmd.Context() + c := common.NewClusterOrDie(ctx) // NOTE: If 'zarf init' failed to deploy the k3s component (or if we're looking at the wrong kubeconfig) // there will be no zarf-state to load and the struct will be empty. In these cases, if we can find diff --git a/src/cmd/internal.go b/src/cmd/internal.go index 40ae382f84..97263d0b08 100644 --- a/src/cmd/internal.go +++ b/src/cmd/internal.go @@ -197,7 +197,7 @@ var createReadOnlyGiteaUser = &cobra.Command{ ctx := cmd.Context() // Load the state so we can get the credentials for the admin git user - state, err := common.NewClusterOrDie().LoadZarfState(ctx) + state, err := common.NewClusterOrDie(ctx).LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) } @@ -214,8 +214,8 @@ var createPackageRegistryToken = &cobra.Command{ Short: lang.CmdInternalArtifactRegistryGiteaTokenShort, Long: lang.CmdInternalArtifactRegistryGiteaTokenLong, Run: func(cmd *cobra.Command, _ []string) { - c := common.NewClusterOrDie() ctx := cmd.Context() + c := common.NewClusterOrDie(ctx) state, err := c.LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) diff --git a/src/cmd/package.go b/src/cmd/package.go index 07cda41159..e8817bf60b 100644 --- a/src/cmd/package.go +++ b/src/cmd/package.go @@ -133,7 +133,7 @@ var packageListCmd = &cobra.Command{ Short: lang.CmdPackageListShort, Run: func(cmd *cobra.Command, _ []string) { ctx := cmd.Context() - deployedZarfPackages, errs := common.NewClusterOrDie().GetDeployedZarfPackages(ctx) + deployedZarfPackages, errs := common.NewClusterOrDie(ctx).GetDeployedZarfPackages(ctx) if len(errs) > 0 && len(deployedZarfPackages) == 0 { message.Fatalf(errs, lang.CmdPackageListNoPackageWarn) } diff --git a/src/cmd/tools/zarf.go b/src/cmd/tools/zarf.go index 130be78212..20655089bc 100644 --- a/src/cmd/tools/zarf.go +++ b/src/cmd/tools/zarf.go @@ -52,7 +52,7 @@ var getCredsCmd = &cobra.Command{ Args: cobra.MaximumNArgs(1), Run: func(cmd *cobra.Command, args []string) { ctx := cmd.Context() - state, err := common.NewClusterOrDie().LoadZarfState(ctx) + state, err := common.NewClusterOrDie(ctx).LoadZarfState(ctx) if err != nil || state.Distro == "" { // If no distro the zarf secret did not load properly message.Fatalf(nil, lang.ErrLoadState) @@ -85,8 +85,8 @@ var updateCredsCmd = &cobra.Command{ } } - c := common.NewClusterOrDie() ctx := cmd.Context() + c := common.NewClusterOrDie(ctx) oldState, err := c.LoadZarfState(ctx) if err != nil || oldState.Distro == "" { // If no distro the zarf secret did not load properly diff --git a/src/test/e2e/22_git_and_gitops_test.go b/src/test/e2e/22_git_and_gitops_test.go index 11a09755eb..7dc712e71a 100644 --- a/src/test/e2e/22_git_and_gitops_test.go +++ b/src/test/e2e/22_git_and_gitops_test.go @@ -70,7 +70,7 @@ func testGitServerConnect(t *testing.T, gitURL string) { func testGitServerReadOnly(ctx context.Context, t *testing.T, gitURL string) { // Init the state variable - state, err := common.NewClusterOrDie().LoadZarfState(ctx) + state, err := common.NewClusterOrDie(ctx).LoadZarfState(ctx) require.NoError(t, err) gitCfg := git.New(state.GitServer) @@ -93,7 +93,7 @@ func testGitServerReadOnly(ctx context.Context, t *testing.T, gitURL string) { func testGitServerTagAndHash(ctx context.Context, t *testing.T, gitURL string) { // Init the state variable - state, err := common.NewClusterOrDie().LoadZarfState(ctx) + state, err := common.NewClusterOrDie(ctx).LoadZarfState(ctx) require.NoError(t, err, "Failed to load Zarf state") repoName := "zarf-public-test-2469062884" From 10f65ae73aa949eebb3c1b462573a784ef5d6360 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Mon, 13 May 2024 10:25:24 -0500 Subject: [PATCH 58/62] Apply PR feedback --- src/cmd/dev.go | 4 +--- src/cmd/tools/helm/repo_add.go | 8 ++++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/cmd/dev.go b/src/cmd/dev.go index 89b46508f5..17962176a4 100644 --- a/src/cmd/dev.go +++ b/src/cmd/dev.go @@ -53,9 +53,7 @@ var devDeployCmd = &cobra.Command{ pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - ctx := cmd.Context() - - if err := pkgClient.DevDeploy(ctx); err != nil { + if err := pkgClient.DevDeploy(cmd.Context()); err != nil { message.Fatalf(err, lang.CmdDevDeployErr, err.Error()) } }, diff --git a/src/cmd/tools/helm/repo_add.go b/src/cmd/tools/helm/repo_add.go index 7854f56d50..114cd020f5 100644 --- a/src/cmd/tools/helm/repo_add.go +++ b/src/cmd/tools/helm/repo_add.go @@ -79,13 +79,13 @@ func newRepoAddCmd(out io.Writer) *cobra.Command { Use: "add [NAME] [URL]", Short: "add a chart repository", Args: require.ExactArgs(2), - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { o.name = args[0] o.url = args[1] o.repoFile = settings.RepositoryConfig o.repoCache = settings.RepositoryCache - return o.run(out) + return o.run(cmd.Context(), out) }, } @@ -105,7 +105,7 @@ func newRepoAddCmd(out io.Writer) *cobra.Command { return cmd } -func (o *repoAddOptions) run(out io.Writer) error { +func (o *repoAddOptions) run(ctx context.Context, out io.Writer) error { // Block deprecated repos if !o.allowDeprecatedRepos { for oldURL, newURL := range deprecatedRepos { @@ -130,7 +130,7 @@ func (o *repoAddOptions) run(out io.Writer) error { lockPath = o.repoFile + ".lock" } fileLock := flock.New(lockPath) - lockCtx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + lockCtx, cancel := context.WithTimeout(ctx, cluster.DefaultTimeout) defer cancel() locked, err := fileLock.TryLockContext(lockCtx, time.Second) if err == nil && locked { From 07e621318aad497d7e1f5a35e63d4b6ef61ead3a Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Mon, 13 May 2024 10:33:56 -0500 Subject: [PATCH 59/62] Go back to using context.TODO in tests --- src/extensions/bigbang/test/bigbang_test.go | 2 +- src/pkg/packager/common_test.go | 2 +- src/test/e2e/21_connect_creds_test.go | 2 +- src/test/e2e/22_git_and_gitops_test.go | 2 +- src/test/e2e/23_data_injection_test.go | 2 +- src/test/e2e/26_simple_packages_test.go | 2 +- src/test/e2e/99_yolo_test.go | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/extensions/bigbang/test/bigbang_test.go b/src/extensions/bigbang/test/bigbang_test.go index 4e016c6a6f..70d0434bac 100644 --- a/src/extensions/bigbang/test/bigbang_test.go +++ b/src/extensions/bigbang/test/bigbang_test.go @@ -60,7 +60,7 @@ func TestReleases(t *testing.T) { zarfCache = fmt.Sprintf("--zarf-cache=%s", CIMount) } - ctx := context.Background() + ctx := context.TODO() // Initialize the cluster with the Git server and AMD64 architecture arch := "amd64" diff --git a/src/pkg/packager/common_test.go b/src/pkg/packager/common_test.go index c0b970c6b6..3ead7f9b4b 100644 --- a/src/pkg/packager/common_test.go +++ b/src/pkg/packager/common_test.go @@ -133,7 +133,7 @@ func TestValidatePackageArchitecture(t *testing.T) { return true, nodeList, nil }) - err := p.validatePackageArchitecture(context.Background()) + err := p.validatePackageArchitecture(context.TODO()) require.Equal(t, testCase.expectedError, err) }) diff --git a/src/test/e2e/21_connect_creds_test.go b/src/test/e2e/21_connect_creds_test.go index a66d390f18..815ebab64e 100644 --- a/src/test/e2e/21_connect_creds_test.go +++ b/src/test/e2e/21_connect_creds_test.go @@ -28,7 +28,7 @@ func TestConnectAndCreds(t *testing.T) { prevAgentSecretData, _, err := e2e.Kubectl("get", "secret", "agent-hook-tls", "-n", "zarf", "-o", "jsonpath={.data}") require.NoError(t, err) - ctx := context.Background() + ctx := context.TODO() connectToZarfServices(ctx, t) diff --git a/src/test/e2e/22_git_and_gitops_test.go b/src/test/e2e/22_git_and_gitops_test.go index 7dc712e71a..bfbb6f52d5 100644 --- a/src/test/e2e/22_git_and_gitops_test.go +++ b/src/test/e2e/22_git_and_gitops_test.go @@ -37,7 +37,7 @@ func TestGit(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - ctx := context.Background() + ctx := context.TODO() tunnelGit, err := c.Connect(ctx, cluster.ZarfGit) require.NoError(t, err) defer tunnelGit.Close() diff --git a/src/test/e2e/23_data_injection_test.go b/src/test/e2e/23_data_injection_test.go index efbce9bc13..75eb84bbf6 100644 --- a/src/test/e2e/23_data_injection_test.go +++ b/src/test/e2e/23_data_injection_test.go @@ -21,7 +21,7 @@ func TestDataInjection(t *testing.T) { t.Log("E2E: Data injection") e2e.SetupWithCluster(t) - ctx := context.Background() + ctx := context.TODO() path := fmt.Sprintf("build/zarf-package-kiwix-%s-3.5.0.tar", e2e.Arch) diff --git a/src/test/e2e/26_simple_packages_test.go b/src/test/e2e/26_simple_packages_test.go index c7a96e80d6..25fa7115e9 100644 --- a/src/test/e2e/26_simple_packages_test.go +++ b/src/test/e2e/26_simple_packages_test.go @@ -27,7 +27,7 @@ func TestDosGames(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect(context.Background(), "doom") + tunnel, err := c.Connect(context.TODO(), "doom") require.NoError(t, err) defer tunnel.Close() diff --git a/src/test/e2e/99_yolo_test.go b/src/test/e2e/99_yolo_test.go index bce9ab1450..1c843501a3 100644 --- a/src/test/e2e/99_yolo_test.go +++ b/src/test/e2e/99_yolo_test.go @@ -36,7 +36,7 @@ func TestYOLOMode(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect(context.Background(), "doom") + tunnel, err := c.Connect(context.TODO(), "doom") require.NoError(t, err) defer tunnel.Close() From 0f48bce673074fb3422b97f8555b4bc6545d9b62 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Mon, 13 May 2024 10:35:36 -0500 Subject: [PATCH 60/62] Missed one --- src/test/e2e/21_connect_creds_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/e2e/21_connect_creds_test.go b/src/test/e2e/21_connect_creds_test.go index 815ebab64e..f742e9e80a 100644 --- a/src/test/e2e/21_connect_creds_test.go +++ b/src/test/e2e/21_connect_creds_test.go @@ -71,7 +71,7 @@ func TestMetrics(t *testing.T) { tunnel, err := c.NewTunnel("zarf", "svc", "agent-hook", "", 8888, 8443) require.NoError(t, err) - _, err = tunnel.Connect(context.Background()) + _, err = tunnel.Connect(context.TODO()) require.NoError(t, err) defer tunnel.Close() From e9a44b728d3f3341004e0b2966c7e7038eeff908 Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 14 May 2024 10:34:57 -0500 Subject: [PATCH 61/62] Back to using context.Background() in tests --- src/test/e2e/21_connect_creds_test.go | 4 ++-- src/test/e2e/22_git_and_gitops_test.go | 2 +- src/test/e2e/23_data_injection_test.go | 2 +- src/test/e2e/26_simple_packages_test.go | 2 +- src/test/e2e/99_yolo_test.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/test/e2e/21_connect_creds_test.go b/src/test/e2e/21_connect_creds_test.go index f742e9e80a..a66d390f18 100644 --- a/src/test/e2e/21_connect_creds_test.go +++ b/src/test/e2e/21_connect_creds_test.go @@ -28,7 +28,7 @@ func TestConnectAndCreds(t *testing.T) { prevAgentSecretData, _, err := e2e.Kubectl("get", "secret", "agent-hook-tls", "-n", "zarf", "-o", "jsonpath={.data}") require.NoError(t, err) - ctx := context.TODO() + ctx := context.Background() connectToZarfServices(ctx, t) @@ -71,7 +71,7 @@ func TestMetrics(t *testing.T) { tunnel, err := c.NewTunnel("zarf", "svc", "agent-hook", "", 8888, 8443) require.NoError(t, err) - _, err = tunnel.Connect(context.TODO()) + _, err = tunnel.Connect(context.Background()) require.NoError(t, err) defer tunnel.Close() diff --git a/src/test/e2e/22_git_and_gitops_test.go b/src/test/e2e/22_git_and_gitops_test.go index bfbb6f52d5..7dc712e71a 100644 --- a/src/test/e2e/22_git_and_gitops_test.go +++ b/src/test/e2e/22_git_and_gitops_test.go @@ -37,7 +37,7 @@ func TestGit(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - ctx := context.TODO() + ctx := context.Background() tunnelGit, err := c.Connect(ctx, cluster.ZarfGit) require.NoError(t, err) defer tunnelGit.Close() diff --git a/src/test/e2e/23_data_injection_test.go b/src/test/e2e/23_data_injection_test.go index 75eb84bbf6..efbce9bc13 100644 --- a/src/test/e2e/23_data_injection_test.go +++ b/src/test/e2e/23_data_injection_test.go @@ -21,7 +21,7 @@ func TestDataInjection(t *testing.T) { t.Log("E2E: Data injection") e2e.SetupWithCluster(t) - ctx := context.TODO() + ctx := context.Background() path := fmt.Sprintf("build/zarf-package-kiwix-%s-3.5.0.tar", e2e.Arch) diff --git a/src/test/e2e/26_simple_packages_test.go b/src/test/e2e/26_simple_packages_test.go index 25fa7115e9..c7a96e80d6 100644 --- a/src/test/e2e/26_simple_packages_test.go +++ b/src/test/e2e/26_simple_packages_test.go @@ -27,7 +27,7 @@ func TestDosGames(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect(context.TODO(), "doom") + tunnel, err := c.Connect(context.Background(), "doom") require.NoError(t, err) defer tunnel.Close() diff --git a/src/test/e2e/99_yolo_test.go b/src/test/e2e/99_yolo_test.go index 1c843501a3..bce9ab1450 100644 --- a/src/test/e2e/99_yolo_test.go +++ b/src/test/e2e/99_yolo_test.go @@ -36,7 +36,7 @@ func TestYOLOMode(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect(context.TODO(), "doom") + tunnel, err := c.Connect(context.Background(), "doom") require.NoError(t, err) defer tunnel.Close() From 5ee026588ecb8d7f95199c8e3a0c1a9ddfed49eb Mon Sep 17 00:00:00 2001 From: Lucas Rodriguez Date: Tue, 14 May 2024 13:26:58 -0500 Subject: [PATCH 62/62] Missed a TODO --- src/extensions/bigbang/test/bigbang_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/extensions/bigbang/test/bigbang_test.go b/src/extensions/bigbang/test/bigbang_test.go index 70d0434bac..4e016c6a6f 100644 --- a/src/extensions/bigbang/test/bigbang_test.go +++ b/src/extensions/bigbang/test/bigbang_test.go @@ -60,7 +60,7 @@ func TestReleases(t *testing.T) { zarfCache = fmt.Sprintf("--zarf-cache=%s", CIMount) } - ctx := context.TODO() + ctx := context.Background() // Initialize the cluster with the Git server and AMD64 architecture arch := "amd64"