diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5e96cba653..38ae99b9bd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,6 +9,7 @@ repos: args: - "--allow-missing-credentials" - id: detect-private-key + exclude: "src/test/e2e/30_config_file_test.go" - id: end-of-file-fixer exclude: site/src/content/docs/commands/.* - id: fix-byte-order-marker diff --git a/src/cmd/common/utils.go b/src/cmd/common/utils.go index 54113ee90f..4fe6fa5043 100644 --- a/src/cmd/common/utils.go +++ b/src/cmd/common/utils.go @@ -5,11 +5,13 @@ package common import ( + "context" "os" "os/signal" "syscall" "github.com/defenseunicorns/zarf/src/config/lang" + "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/pkg/message" ) @@ -35,3 +37,14 @@ func ExitOnInterrupt() { } }() } + +// NewClusterOrDie creates a new Cluster instance and waits for the cluster to be ready or throws a fatal error. +func NewClusterOrDie(ctx context.Context) *cluster.Cluster { + timeoutCtx, cancel := context.WithTimeout(ctx, cluster.DefaultTimeout) + defer cancel() + c, err := cluster.NewClusterWithWait(timeoutCtx) + if err != nil { + message.Fatalf(err, "Failed to connect to cluster") + } + return c +} diff --git a/src/cmd/connect.go b/src/cmd/connect.go index 39b382cad3..19422df967 100644 --- a/src/cmd/connect.go +++ b/src/cmd/connect.go @@ -32,7 +32,7 @@ var ( Aliases: []string{"c"}, Short: lang.CmdConnectShort, Long: lang.CmdConnectLong, - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { var target string if len(args) > 0 { target = args[0] @@ -43,12 +43,14 @@ var ( spinner.Fatalf(err, lang.CmdConnectErrCluster, err.Error()) } + ctx := cmd.Context() + var tunnel *k8s.Tunnel if connectResourceName != "" { zt := cluster.NewTunnelInfo(connectNamespace, connectResourceType, connectResourceName, "", connectLocalPort, connectRemotePort) - tunnel, err = c.ConnectTunnelInfo(zt) + tunnel, err = c.ConnectTunnelInfo(ctx, zt) } else { - tunnel, err = c.Connect(target) + tunnel, err = c.Connect(ctx, target) } if err != nil { spinner.Fatalf(err, lang.CmdConnectErrService, err.Error()) @@ -90,8 +92,11 @@ var ( Use: "list", Aliases: []string{"l"}, Short: lang.CmdConnectListShort, - Run: func(_ *cobra.Command, _ []string) { - cluster.NewClusterOrDie().PrintConnectTable() + Run: func(cmd *cobra.Command, _ []string) { + ctx := cmd.Context() + if err := common.NewClusterOrDie(ctx).PrintConnectTable(ctx); err != nil { + message.Fatal(err, err.Error()) + } }, } ) diff --git a/src/cmd/destroy.go b/src/cmd/destroy.go index bc9dbe7d61..429aec2c16 100644 --- a/src/cmd/destroy.go +++ b/src/cmd/destroy.go @@ -10,10 +10,10 @@ import ( "regexp" "github.com/defenseunicorns/pkg/helpers" + "github.com/defenseunicorns/zarf/src/cmd/common" "github.com/defenseunicorns/zarf/src/config" "github.com/defenseunicorns/zarf/src/config/lang" "github.com/defenseunicorns/zarf/src/internal/packager/helm" - "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/defenseunicorns/zarf/src/pkg/utils/exec" @@ -28,16 +28,14 @@ var destroyCmd = &cobra.Command{ Aliases: []string{"d"}, Short: lang.CmdDestroyShort, Long: lang.CmdDestroyLong, - Run: func(_ *cobra.Command, _ []string) { - c, err := cluster.NewClusterWithWait(cluster.DefaultTimeout) - if err != nil { - message.Fatalf(err, lang.ErrNoClusterConnection) - } + Run: func(cmd *cobra.Command, _ []string) { + ctx := cmd.Context() + c := common.NewClusterOrDie(ctx) // NOTE: If 'zarf init' failed to deploy the k3s component (or if we're looking at the wrong kubeconfig) // there will be no zarf-state to load and the struct will be empty. In these cases, if we can find // the scripts to remove k3s, we will still try to remove a locally installed k3s cluster - state, err := c.LoadZarfState() + state, err := c.LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) } @@ -74,10 +72,12 @@ var destroyCmd = &cobra.Command{ helm.Destroy(removeComponents) // If Zarf didn't deploy the cluster, only delete the ZarfNamespace - c.DeleteZarfNamespace() + if err := c.DeleteZarfNamespace(ctx); err != nil { + message.Fatal(err, err.Error()) + } // Remove zarf agent labels and secrets from namespaces Zarf doesn't manage - c.StripZarfLabelsAndSecretsFromNamespaces() + c.StripZarfLabelsAndSecretsFromNamespaces(ctx) } }, } diff --git a/src/cmd/dev.go b/src/cmd/dev.go index 068565f7c7..17962176a4 100644 --- a/src/cmd/dev.go +++ b/src/cmd/dev.go @@ -40,7 +40,7 @@ var devDeployCmd = &cobra.Command{ Args: cobra.MaximumNArgs(1), Short: lang.CmdDevDeployShort, Long: lang.CmdDevDeployLong, - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { pkgConfig.CreateOpts.BaseDir = common.SetBaseDirectory(args) v := common.GetViper() @@ -50,12 +50,10 @@ var devDeployCmd = &cobra.Command{ pkgConfig.PkgOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgDeploySet), pkgConfig.PkgOpts.SetVariables, strings.ToUpper) - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Create the package - if err := pkgClient.DevDeploy(); err != nil { + if err := pkgClient.DevDeploy(cmd.Context()); err != nil { message.Fatalf(err, lang.CmdDevDeployErr, err.Error()) } }, @@ -209,19 +207,15 @@ var devFindImagesCmd = &cobra.Command{ Run: func(_ *cobra.Command, args []string) { pkgConfig.CreateOpts.BaseDir = common.SetBaseDirectory(args) - // Ensure uppercase keys from viper v := common.GetViper() pkgConfig.CreateOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgCreateSet), pkgConfig.CreateOpts.SetVariables, strings.ToUpper) pkgConfig.PkgOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgDeploySet), pkgConfig.PkgOpts.SetVariables, strings.ToUpper) - - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Find all the images the package might need if _, err := pkgClient.FindImages(); err != nil { message.Fatalf(err, lang.CmdDevFindImagesErr, err.Error()) } @@ -292,8 +286,14 @@ func init() { // use the package create config for this and reset it here to avoid overwriting the config.CreateOptions.SetVariables devFindImagesCmd.Flags().StringToStringVar(&pkgConfig.CreateOpts.SetVariables, "set", v.GetStringMapString(common.VPkgCreateSet), lang.CmdDevFlagSet) - devFindImagesCmd.Flags().MarkDeprecated("set", "this field is replaced by create-set") - devFindImagesCmd.Flags().MarkHidden("set") + err := devFindImagesCmd.Flags().MarkDeprecated("set", "this field is replaced by create-set") + if err != nil { + message.Fatal(err, err.Error()) + } + err = devFindImagesCmd.Flags().MarkHidden("set") + if err != nil { + message.Fatal(err, err.Error()) + } devFindImagesCmd.Flags().StringVarP(&pkgConfig.CreateOpts.Flavor, "flavor", "f", v.GetString(common.VPkgCreateFlavor), lang.CmdPackageCreateFlagFlavor) devFindImagesCmd.Flags().StringToStringVar(&pkgConfig.CreateOpts.SetVariables, "create-set", v.GetStringMapString(common.VPkgCreateSet), lang.CmdDevFlagSet) devFindImagesCmd.Flags().StringToStringVar(&pkgConfig.PkgOpts.SetVariables, "deploy-set", v.GetStringMapString(common.VPkgDeploySet), lang.CmdPackageDeployFlagSet) @@ -341,7 +341,16 @@ func bindDevGenerateFlags(_ *viper.Viper) { generateFlags.StringVar(&pkgConfig.GenerateOpts.Output, "output-directory", "", "Output directory for the generated zarf.yaml") generateFlags.StringVar(&pkgConfig.FindImagesOpts.KubeVersionOverride, "kube-version", "", lang.CmdDevFlagKubeVersion) - devGenerateCmd.MarkFlagRequired("url") - devGenerateCmd.MarkFlagRequired("version") - devGenerateCmd.MarkFlagRequired("output-directory") + err := devGenerateCmd.MarkFlagRequired("url") + if err != nil { + message.Fatal(err, err.Error()) + } + err = devGenerateCmd.MarkFlagRequired("version") + if err != nil { + message.Fatal(err, err.Error()) + } + err = devGenerateCmd.MarkFlagRequired("output-directory") + if err != nil { + message.Fatal(err, err.Error()) + } } diff --git a/src/cmd/initialize.go b/src/cmd/initialize.go index 0f8a1ee322..b41c3ca8d9 100644 --- a/src/cmd/initialize.go +++ b/src/cmd/initialize.go @@ -35,7 +35,7 @@ var initCmd = &cobra.Command{ Short: lang.CmdInitShort, Long: lang.CmdInitLong, Example: lang.CmdInitExample, - Run: func(_ *cobra.Command, _ []string) { + Run: func(cmd *cobra.Command, _ []string) { zarfLogo := message.GetLogo() _, _ = fmt.Fprintln(os.Stderr, zarfLogo) @@ -58,17 +58,16 @@ var initCmd = &cobra.Command{ message.Fatal(err, err.Error()) } - // Ensure uppercase keys from viper v := common.GetViper() pkgConfig.PkgOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgDeploySet), pkgConfig.PkgOpts.SetVariables, strings.ToUpper) - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig, packager.WithSource(src)) defer pkgClient.ClearTempPaths() - // Deploy everything - err = pkgClient.Deploy() + ctx := cmd.Context() + + err = pkgClient.Deploy(ctx) if err != nil { message.Fatal(err, err.Error()) } diff --git a/src/cmd/internal.go b/src/cmd/internal.go index 55b7f39232..97263d0b08 100644 --- a/src/cmd/internal.go +++ b/src/cmd/internal.go @@ -16,7 +16,6 @@ import ( "github.com/defenseunicorns/zarf/src/config/lang" "github.com/defenseunicorns/zarf/src/internal/agent" "github.com/defenseunicorns/zarf/src/internal/packager/git" - "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/defenseunicorns/zarf/src/types" "github.com/invopop/jsonschema" @@ -194,15 +193,17 @@ var createReadOnlyGiteaUser = &cobra.Command{ Use: "create-read-only-gitea-user", Short: lang.CmdInternalCreateReadOnlyGiteaUserShort, Long: lang.CmdInternalCreateReadOnlyGiteaUserLong, - Run: func(_ *cobra.Command, _ []string) { + Run: func(cmd *cobra.Command, _ []string) { + ctx := cmd.Context() + // Load the state so we can get the credentials for the admin git user - state, err := cluster.NewClusterOrDie().LoadZarfState() + state, err := common.NewClusterOrDie(ctx).LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) } // Create the non-admin user - if err = git.New(state.GitServer).CreateReadOnlyUser(); err != nil { + if err = git.New(state.GitServer).CreateReadOnlyUser(ctx); err != nil { message.WarnErr(err, lang.CmdInternalCreateReadOnlyGiteaUserErr) } }, @@ -212,24 +213,26 @@ var createPackageRegistryToken = &cobra.Command{ Use: "create-artifact-registry-token", Short: lang.CmdInternalArtifactRegistryGiteaTokenShort, Long: lang.CmdInternalArtifactRegistryGiteaTokenLong, - Run: func(_ *cobra.Command, _ []string) { - // Load the state so we can get the credentials for the admin git user - c := cluster.NewClusterOrDie() - state, err := c.LoadZarfState() + Run: func(cmd *cobra.Command, _ []string) { + ctx := cmd.Context() + c := common.NewClusterOrDie(ctx) + state, err := c.LoadZarfState(ctx) if err != nil { message.WarnErr(err, lang.ErrLoadState) } // If we are setup to use an internal artifact server, create the artifact registry token if state.ArtifactServer.InternalServer { - token, err := git.New(state.GitServer).CreatePackageRegistryToken() + token, err := git.New(state.GitServer).CreatePackageRegistryToken(ctx) if err != nil { message.WarnErr(err, lang.CmdInternalArtifactRegistryGiteaTokenErr) } state.ArtifactServer.PushToken = token.Sha1 - c.SaveZarfState(state) + if err := c.SaveZarfState(ctx, state); err != nil { + message.Fatal(err, err.Error()) + } } }, } @@ -238,10 +241,11 @@ var updateGiteaPVC = &cobra.Command{ Use: "update-gitea-pvc", Short: lang.CmdInternalUpdateGiteaPVCShort, Long: lang.CmdInternalUpdateGiteaPVCLong, - Run: func(_ *cobra.Command, _ []string) { + Run: func(cmd *cobra.Command, _ []string) { + ctx := cmd.Context() // There is a possibility that the pvc does not yet exist and Gitea helm chart should create it - helmShouldCreate, err := git.UpdateGiteaPVC(rollback) + helmShouldCreate, err := git.UpdateGiteaPVC(ctx, rollback) if err != nil { message.WarnErr(err, lang.CmdInternalUpdateGiteaPVCErr) } @@ -294,6 +298,9 @@ func addHiddenDummyFlag(cmd *cobra.Command, flagDummy string) { if cmd.PersistentFlags().Lookup(flagDummy) == nil { var dummyStr string cmd.PersistentFlags().StringVar(&dummyStr, flagDummy, "", "") - cmd.PersistentFlags().MarkHidden(flagDummy) + err := cmd.PersistentFlags().MarkHidden(flagDummy) + if err != nil { + message.Fatal(err, err.Error()) + } } } diff --git a/src/cmd/package.go b/src/cmd/package.go index a0f124a864..e8817bf60b 100644 --- a/src/cmd/package.go +++ b/src/cmd/package.go @@ -48,16 +48,13 @@ var packageCreateCmd = &cobra.Command{ config.CommonOptions.CachePath = config.ZarfDefaultCachePath } - // Ensure uppercase keys from viper v := common.GetViper() pkgConfig.CreateOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgCreateSet), pkgConfig.CreateOpts.SetVariables, strings.ToUpper) - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Create the package if err := pkgClient.Create(); err != nil { message.Fatalf(err, lang.CmdPackageCreateErr, err.Error()) } @@ -70,22 +67,19 @@ var packageDeployCmd = &cobra.Command{ Short: lang.CmdPackageDeployShort, Long: lang.CmdPackageDeployLong, Args: cobra.MaximumNArgs(1), - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { pkgConfig.PkgOpts.PackageSource = choosePackage(args) - // Ensure uppercase keys from viper and CLI --set v := common.GetViper() - - // Merge the viper config file variables and provided CLI flag variables (CLI takes precedence)) pkgConfig.PkgOpts.SetVariables = helpers.TransformAndMergeMap( v.GetStringMapString(common.VPkgDeploySet), pkgConfig.PkgOpts.SetVariables, strings.ToUpper) - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Deploy the package - if err := pkgClient.Deploy(); err != nil { + ctx := cmd.Context() + + if err := pkgClient.Deploy(ctx); err != nil { message.Fatalf(err, lang.CmdPackageDeployErr, err.Error()) } }, @@ -98,15 +92,15 @@ var packageMirrorCmd = &cobra.Command{ Long: lang.CmdPackageMirrorLong, Example: lang.CmdPackageMirrorExample, Args: cobra.MaximumNArgs(1), - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { pkgConfig.PkgOpts.PackageSource = choosePackage(args) - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Deploy the package - if err := pkgClient.Mirror(); err != nil { + ctx := cmd.Context() + + if err := pkgClient.Mirror(ctx); err != nil { message.Fatalf(err, lang.CmdPackageDeployErr, err.Error()) } }, @@ -123,11 +117,9 @@ var packageInspectCmd = &cobra.Command{ src := identifyAndFallbackToClusterSource() - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig, packager.WithSource(src)) defer pkgClient.ClearTempPaths() - // Inspect the package if err := pkgClient.Inspect(); err != nil { message.Fatalf(err, lang.CmdPackageInspectErr, err.Error()) } @@ -139,9 +131,9 @@ var packageListCmd = &cobra.Command{ Use: "list", Aliases: []string{"l", "ls"}, Short: lang.CmdPackageListShort, - Run: func(_ *cobra.Command, _ []string) { - // Get all the deployed packages - deployedZarfPackages, errs := cluster.NewClusterOrDie().GetDeployedZarfPackages() + Run: func(cmd *cobra.Command, _ []string) { + ctx := cmd.Context() + deployedZarfPackages, errs := common.NewClusterOrDie(ctx).GetDeployedZarfPackages(ctx) if len(errs) > 0 && len(deployedZarfPackages) == 0 { message.Fatalf(errs, lang.CmdPackageListNoPackageWarn) } @@ -161,7 +153,6 @@ var packageListCmd = &cobra.Command{ }) } - // Print out the table for the user header := []string{"Package", "Version", "Components"} message.Table(header, packageData) @@ -177,15 +168,17 @@ var packageRemoveCmd = &cobra.Command{ Aliases: []string{"u", "rm"}, Args: cobra.MaximumNArgs(1), Short: lang.CmdPackageRemoveShort, - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { pkgConfig.PkgOpts.PackageSource = choosePackage(args) src := identifyAndFallbackToClusterSource() - // Configure the packager + pkgClient := packager.NewOrDie(&pkgConfig, packager.WithSource(src)) defer pkgClient.ClearTempPaths() - if err := pkgClient.Remove(); err != nil { + ctx := cmd.Context() + + if err := pkgClient.Remove(ctx); err != nil { message.Fatalf(err, lang.CmdPackageRemoveErr, err.Error()) } }, @@ -220,11 +213,9 @@ var packagePublishCmd = &cobra.Command{ pkgConfig.PublishOpts.PackageDestination = ref.String() - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Publish the package if err := pkgClient.Publish(); err != nil { message.Fatalf(err, lang.CmdPackagePublishErr, err.Error()) } @@ -239,11 +230,9 @@ var packagePullCmd = &cobra.Command{ Run: func(_ *cobra.Command, args []string) { pkgConfig.PkgOpts.PackageSource = args[0] - // Configure the packager pkgClient := packager.NewOrDie(&pkgConfig) defer pkgClient.ClearTempPaths() - // Pull the package if err := pkgClient.Pull(); err != nil { message.Fatalf(err, lang.CmdPackagePullErr, err.Error()) } @@ -288,7 +277,7 @@ func identifyAndFallbackToClusterSource() (src sources.PackageSource) { return src } -func getPackageCompletionArgs(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { +func getPackageCompletionArgs(cmd *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { var pkgCandidates []string c, err := cluster.NewCluster() @@ -296,8 +285,9 @@ func getPackageCompletionArgs(_ *cobra.Command, _ []string, _ string) ([]string, return pkgCandidates, cobra.ShellCompDirectiveDefault } - // Get all the deployed packages - deployedZarfPackages, _ := c.GetDeployedZarfPackages() + ctx := cmd.Context() + + deployedZarfPackages, _ := c.GetDeployedZarfPackages(ctx) // Populate list of package names for _, pkg := range deployedZarfPackages { pkgCandidates = append(pkgCandidates, pkg.Name) @@ -366,9 +356,18 @@ func bindCreateFlags(v *viper.Viper) { createFlags.IntVar(&pkgConfig.PkgOpts.Retries, "retries", v.GetInt(common.VPkgRetries), lang.CmdPackageFlagRetries) - createFlags.MarkHidden("output-directory") - createFlags.MarkHidden("key") - createFlags.MarkHidden("key-pass") + err := createFlags.MarkHidden("output-directory") + if err != nil { + message.Fatal(err, err.Error()) + } + err = createFlags.MarkHidden("key") + if err != nil { + message.Fatal(err, err.Error()) + } + err = createFlags.MarkHidden("key-pass") + if err != nil { + message.Fatal(err, err.Error()) + } } func bindDeployFlags(v *viper.Viper) { @@ -388,7 +387,10 @@ func bindDeployFlags(v *viper.Viper) { deployFlags.StringVar(&pkgConfig.PkgOpts.Shasum, "shasum", v.GetString(common.VPkgDeployShasum), lang.CmdPackageDeployFlagShasum) deployFlags.StringVar(&pkgConfig.PkgOpts.SGetKeyPath, "sget", v.GetString(common.VPkgDeploySget), lang.CmdPackageDeployFlagSget) - deployFlags.MarkHidden("sget") + err := deployFlags.MarkHidden("sget") + if err != nil { + message.Fatal(err, err.Error()) + } } func bindMirrorFlags(v *viper.Viper) { diff --git a/src/cmd/root.go b/src/cmd/root.go index 9563f382d3..83fb28e61d 100644 --- a/src/cmd/root.go +++ b/src/cmd/root.go @@ -5,6 +5,7 @@ package cmd import ( + "context" "fmt" "os" "strings" @@ -37,6 +38,10 @@ var rootCmd = &cobra.Command{ config.SkipLogFile = true } + // Set the global context for the root command and all child commands + ctx := context.Background() + cmd.SetContext(ctx) + common.SetupCLI() }, Short: lang.RootCmdShort, diff --git a/src/cmd/tools/archiver.go b/src/cmd/tools/archiver.go index 9a1ee1c022..344cc7398c 100644 --- a/src/cmd/tools/archiver.go +++ b/src/cmd/tools/archiver.go @@ -93,5 +93,8 @@ func init() { archiverDecompressCmd.Flags().BoolVar(&unarchiveAll, "decompress-all", false, "Decompress all tarballs in the archive") archiverDecompressCmd.Flags().BoolVar(&unarchiveAll, "unarchive-all", false, "Unarchive all tarballs in the archive") archiverDecompressCmd.MarkFlagsMutuallyExclusive("decompress-all", "unarchive-all") - archiverDecompressCmd.Flags().MarkHidden("decompress-all") + err := archiverDecompressCmd.Flags().MarkHidden("decompress-all") + if err != nil { + message.Fatal(err, err.Error()) + } } diff --git a/src/cmd/tools/crane.go b/src/cmd/tools/crane.go index 792e4b555c..b501aeac7a 100644 --- a/src/cmd/tools/crane.go +++ b/src/cmd/tools/crane.go @@ -124,13 +124,14 @@ func zarfCraneCatalog(cranePlatformOptions []crane.Option) *cobra.Command { return err } - // Load Zarf state - zarfState, err := c.LoadZarfState() + ctx := cmd.Context() + + zarfState, err := c.LoadZarfState(ctx) if err != nil { return err } - registryEndpoint, tunnel, err := c.ConnectToZarfRegistryEndpoint(zarfState.RegistryInfo) + registryEndpoint, tunnel, err := c.ConnectToZarfRegistryEndpoint(ctx, zarfState.RegistryInfo) if err != nil { return err } @@ -173,8 +174,9 @@ func zarfCraneInternalWrapper(commandToWrap func(*[]crane.Option) *cobra.Command message.Note(lang.CmdToolsRegistryZarfState) - // Load the state (if able) - zarfState, err := c.LoadZarfState() + ctx := cmd.Context() + + zarfState, err := c.LoadZarfState(ctx) if err != nil { message.Warnf(lang.CmdToolsCraneConnectedButBadStateErr, err.Error()) return originalListFn(cmd, args) @@ -185,7 +187,7 @@ func zarfCraneInternalWrapper(commandToWrap func(*[]crane.Option) *cobra.Command return originalListFn(cmd, args) } - _, tunnel, err := c.ConnectToZarfRegistryEndpoint(zarfState.RegistryInfo) + _, tunnel, err := c.ConnectToZarfRegistryEndpoint(ctx, zarfState.RegistryInfo) if err != nil { return err } @@ -211,27 +213,27 @@ func zarfCraneInternalWrapper(commandToWrap func(*[]crane.Option) *cobra.Command return wrappedCommand } -func pruneImages(_ *cobra.Command, _ []string) error { +func pruneImages(cmd *cobra.Command, _ []string) error { // Try to connect to a Zarf initialized cluster c, err := cluster.NewCluster() if err != nil { return err } - // Load the state - zarfState, err := c.LoadZarfState() + ctx := cmd.Context() + + zarfState, err := c.LoadZarfState(ctx) if err != nil { return err } - // Load the currently deployed packages - zarfPackages, errs := c.GetDeployedZarfPackages() + zarfPackages, errs := c.GetDeployedZarfPackages(ctx) if len(errs) > 0 { return lang.ErrUnableToGetPackages } // Set up a tunnel to the registry if applicable - registryEndpoint, tunnel, err := c.ConnectToZarfRegistryEndpoint(zarfState.RegistryInfo) + registryEndpoint, tunnel, err := c.ConnectToZarfRegistryEndpoint(ctx, zarfState.RegistryInfo) if err != nil { return err } diff --git a/src/cmd/tools/helm/load_plugins.go b/src/cmd/tools/helm/load_plugins.go index 28ea155030..f4d2800137 100644 --- a/src/cmd/tools/helm/load_plugins.go +++ b/src/cmd/tools/helm/load_plugins.go @@ -32,6 +32,7 @@ import ( "strings" "syscall" + "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/pkg/errors" "github.com/spf13/cobra" "sigs.k8s.io/yaml" @@ -216,7 +217,10 @@ func loadCompletionForPlugin(pluginCmd *cobra.Command, plugin *plugin.Plugin) { if err != nil { // The file could be missing or invalid. No static completion for this plugin. if settings.Debug { - log.Output(2, fmt.Sprintf("[info] %s\n", err.Error())) + err := log.Output(2, fmt.Sprintf("[info] %s\n", err.Error())) + if err != nil { + message.Fatal(err, err.Error()) + } } // Continue to setup dynamic completion. cmds = &pluginCommand{} @@ -238,7 +242,10 @@ func addPluginCommands(plugin *plugin.Plugin, baseCmd *cobra.Command, cmds *plug if len(cmds.Name) == 0 { // Missing name for a command if settings.Debug { - log.Output(2, fmt.Sprintf("[info] sub-command name field missing for %s", baseCmd.CommandPath())) + err := log.Output(2, fmt.Sprintf("[info] sub-command name field missing for %s", baseCmd.CommandPath())) + if err != nil { + message.Fatal(err, err.Error()) + } } return } diff --git a/src/cmd/tools/helm/repo_add.go b/src/cmd/tools/helm/repo_add.go index d053689000..114cd020f5 100644 --- a/src/cmd/tools/helm/repo_add.go +++ b/src/cmd/tools/helm/repo_add.go @@ -31,6 +31,8 @@ import ( "time" "github.com/defenseunicorns/pkg/helpers" + "github.com/defenseunicorns/zarf/src/pkg/cluster" + "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/gofrs/flock" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -77,13 +79,13 @@ func newRepoAddCmd(out io.Writer) *cobra.Command { Use: "add [NAME] [URL]", Short: "add a chart repository", Args: require.ExactArgs(2), - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { o.name = args[0] o.url = args[1] o.repoFile = settings.RepositoryConfig o.repoCache = settings.RepositoryCache - return o.run(out) + return o.run(cmd.Context(), out) }, } @@ -103,7 +105,7 @@ func newRepoAddCmd(out io.Writer) *cobra.Command { return cmd } -func (o *repoAddOptions) run(out io.Writer) error { +func (o *repoAddOptions) run(ctx context.Context, out io.Writer) error { // Block deprecated repos if !o.allowDeprecatedRepos { for oldURL, newURL := range deprecatedRepos { @@ -128,11 +130,15 @@ func (o *repoAddOptions) run(out io.Writer) error { lockPath = o.repoFile + ".lock" } fileLock := flock.New(lockPath) - lockCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + lockCtx, cancel := context.WithTimeout(ctx, cluster.DefaultTimeout) defer cancel() locked, err := fileLock.TryLockContext(lockCtx, time.Second) if err == nil && locked { - defer fileLock.Unlock() + defer func() { + if err := fileLock.Unlock(); err != nil { + message.Fatal(err, err.Error()) + } + }() } if err != nil { return err diff --git a/src/cmd/tools/helm/repo_index.go b/src/cmd/tools/helm/repo_index.go index a84a3af74c..1d6182e85e 100644 --- a/src/cmd/tools/helm/repo_index.go +++ b/src/cmd/tools/helm/repo_index.go @@ -27,6 +27,7 @@ import ( "path/filepath" "github.com/defenseunicorns/pkg/helpers" + "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -101,7 +102,10 @@ func index(dir, url, mergeTo string) error { var i2 *repo.IndexFile if _, err := os.Stat(mergeTo); os.IsNotExist(err) { i2 = repo.NewIndexFile() - i2.WriteFile(mergeTo, helpers.ReadAllWriteUser) + err := i2.WriteFile(mergeTo, helpers.ReadAllWriteUser) + if err != nil { + message.Fatal(err, err.Error()) + } } else { i2, err = repo.LoadIndexFile(mergeTo) if err != nil { diff --git a/src/cmd/tools/zarf.go b/src/cmd/tools/zarf.go index 363fe87c63..20655089bc 100644 --- a/src/cmd/tools/zarf.go +++ b/src/cmd/tools/zarf.go @@ -19,7 +19,6 @@ import ( "github.com/defenseunicorns/zarf/src/internal/packager/git" "github.com/defenseunicorns/zarf/src/internal/packager/helm" "github.com/defenseunicorns/zarf/src/internal/packager/template" - "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/pkg/message" "github.com/defenseunicorns/zarf/src/pkg/packager/sources" "github.com/defenseunicorns/zarf/src/pkg/pki" @@ -51,8 +50,9 @@ var getCredsCmd = &cobra.Command{ Example: lang.CmdToolsGetCredsExample, Aliases: []string{"gc"}, Args: cobra.MaximumNArgs(1), - Run: func(_ *cobra.Command, args []string) { - state, err := cluster.NewClusterOrDie().LoadZarfState() + Run: func(cmd *cobra.Command, args []string) { + ctx := cmd.Context() + state, err := common.NewClusterOrDie(ctx).LoadZarfState(ctx) if err != nil || state.Distro == "" { // If no distro the zarf secret did not load properly message.Fatalf(nil, lang.ErrLoadState) @@ -85,8 +85,9 @@ var updateCredsCmd = &cobra.Command{ } } - c := cluster.NewClusterOrDie() - oldState, err := c.LoadZarfState() + ctx := cmd.Context() + c := common.NewClusterOrDie(ctx) + oldState, err := c.LoadZarfState(ctx) if err != nil || oldState.Distro == "" { // If no distro the zarf secret did not load properly message.Fatalf(nil, lang.ErrLoadState) @@ -114,16 +115,16 @@ var updateCredsCmd = &cobra.Command{ if confirm { // Update registry and git pull secrets if slices.Contains(args, message.RegistryKey) { - c.UpdateZarfManagedImageSecrets(newState) + c.UpdateZarfManagedImageSecrets(ctx, newState) } if slices.Contains(args, message.GitKey) { - c.UpdateZarfManagedGitSecrets(newState) + c.UpdateZarfManagedGitSecrets(ctx, newState) } // Update artifact token (if internal) if slices.Contains(args, message.ArtifactKey) && newState.ArtifactServer.PushToken == "" && newState.ArtifactServer.InternalServer { g := git.New(oldState.GitServer) - tokenResponse, err := g.CreatePackageRegistryToken() + tokenResponse, err := g.CreatePackageRegistryToken(ctx) if err != nil { // Warn if we couldn't actually update the git server (it might not be installed and we should try to continue) message.Warnf(lang.CmdToolsUpdateCredsUnableCreateToken, err.Error()) @@ -133,7 +134,7 @@ var updateCredsCmd = &cobra.Command{ } // Save the final Zarf State - err = c.SaveZarfState(newState) + err = c.SaveZarfState(ctx, newState) if err != nil { message.Fatalf(err, lang.ErrSaveState) } @@ -150,14 +151,14 @@ var updateCredsCmd = &cobra.Command{ } if slices.Contains(args, message.GitKey) && newState.GitServer.InternalServer { g := git.New(newState.GitServer) - err = g.UpdateZarfGiteaUsers(oldState) + err = g.UpdateZarfGiteaUsers(ctx, oldState) if err != nil { // Warn if we couldn't actually update the git server (it might not be installed and we should try to continue) message.Warnf(lang.CmdToolsUpdateCredsUnableUpdateGit, err.Error()) } } if slices.Contains(args, message.AgentKey) { - err = h.UpdateZarfAgentValues() + err = h.UpdateZarfAgentValues(ctx) if err != nil { // Warn if we couldn't actually update the agent (it might not be installed and we should try to continue) message.Warnf(lang.CmdToolsUpdateCredsUnableUpdateAgent, err.Error()) diff --git a/src/extensions/bigbang/test/bigbang_test.go b/src/extensions/bigbang/test/bigbang_test.go index 64f0f95e23..4e016c6a6f 100644 --- a/src/extensions/bigbang/test/bigbang_test.go +++ b/src/extensions/bigbang/test/bigbang_test.go @@ -60,45 +60,47 @@ func TestReleases(t *testing.T) { zarfCache = fmt.Sprintf("--zarf-cache=%s", CIMount) } + ctx := context.Background() + // Initialize the cluster with the Git server and AMD64 architecture arch := "amd64" - stdOut, stdErr, err := zarfExec("init", "--components", "git-server", "--architecture", arch, tmpdir, "--confirm", zarfCache) + stdOut, stdErr, err := zarfExec(ctx, "init", "--components", "git-server", "--architecture", arch, tmpdir, "--confirm", zarfCache) require.NoError(t, err, stdOut, stdErr) // Remove the init package to free up disk space on the test runner - err = os.RemoveAll(fmt.Sprintf("zarf-init-%s-%s.tar.zst", arch, getZarfVersion(t))) + err = os.RemoveAll(fmt.Sprintf("zarf-init-%s-%s.tar.zst", arch, getZarfVersion(ctx, t))) require.NoError(t, err) // Build the previous version bbVersion := fmt.Sprintf("--set=BB_VERSION=%s", previous) bbMajor := fmt.Sprintf("--set=BB_MAJOR=%s", previous[0:1]) - stdOut, stdErr, err = zarfExec("package", "create", "../src/extensions/bigbang/test/package", bbVersion, bbMajor, tmpdir, "--confirm") + stdOut, stdErr, err = zarfExec(ctx, "package", "create", "../src/extensions/bigbang/test/package", bbVersion, bbMajor, tmpdir, "--confirm") require.NoError(t, err, stdOut, stdErr) // Clean up zarf cache to reduce disk pressure - stdOut, stdErr, err = zarfExec("tools", "clear-cache") + stdOut, stdErr, err = zarfExec(ctx, "tools", "clear-cache") require.NoError(t, err, stdOut, stdErr) // Deploy the previous version pkgPath := fmt.Sprintf("zarf-package-big-bang-test-%s-%s.tar.zst", arch, previous) - stdOut, stdErr, err = zarfExec("package", "deploy", pkgPath, tmpdir, "--confirm") + stdOut, stdErr, err = zarfExec(ctx, "package", "deploy", pkgPath, tmpdir, "--confirm") require.NoError(t, err, stdOut, stdErr) // HACK: scale down the flux deployments due to very-low CPU in the test runner fluxControllers := []string{"helm-controller", "source-controller", "kustomize-controller", "notification-controller"} for _, deployment := range fluxControllers { - stdOut, stdErr, err = zarfExec("tools", "kubectl", "-n", "flux-system", "scale", "deployment", deployment, "--replicas=0") + stdOut, stdErr, err = zarfExec(ctx, "tools", "kubectl", "-n", "flux-system", "scale", "deployment", deployment, "--replicas=0") require.NoError(t, err, stdOut, stdErr) } // Cluster info - stdOut, stdErr, err = zarfExec("tools", "kubectl", "describe", "nodes") + stdOut, stdErr, err = zarfExec(ctx, "tools", "kubectl", "describe", "nodes") require.NoError(t, err, stdOut, stdErr) // Build the latest version bbVersion = fmt.Sprintf("--set=BB_VERSION=%s", latest) bbMajor = fmt.Sprintf("--set=BB_MAJOR=%s", latest[0:1]) - stdOut, stdErr, err = zarfExec("package", "create", "../src/extensions/bigbang/test/package", bbVersion, bbMajor, "--differential", pkgPath, tmpdir, "--confirm") + stdOut, stdErr, err = zarfExec(ctx, "package", "create", "../src/extensions/bigbang/test/package", bbVersion, bbMajor, "--differential", pkgPath, tmpdir, "--confirm") require.NoError(t, err, stdOut, stdErr) // Remove the previous version package @@ -106,23 +108,23 @@ func TestReleases(t *testing.T) { require.NoError(t, err) // Clean up zarf cache to reduce disk pressure - stdOut, stdErr, err = zarfExec("tools", "clear-cache") + stdOut, stdErr, err = zarfExec(ctx, "tools", "clear-cache") require.NoError(t, err, stdOut, stdErr) // Deploy the latest version pkgPath = fmt.Sprintf("zarf-package-big-bang-test-%s-%s-differential-%s.tar.zst", arch, previous, latest) - stdOut, stdErr, err = zarfExec("package", "deploy", pkgPath, tmpdir, "--confirm") + stdOut, stdErr, err = zarfExec(ctx, "package", "deploy", pkgPath, tmpdir, "--confirm") require.NoError(t, err, stdOut, stdErr) // Cluster info - stdOut, stdErr, err = zarfExec("tools", "kubectl", "describe", "nodes") + stdOut, stdErr, err = zarfExec(ctx, "tools", "kubectl", "describe", "nodes") require.NoError(t, err, stdOut, stdErr) // Test connectivity to Twistlock - testConnection(t) + testConnection(ctx, t) } -func testConnection(t *testing.T) { +func testConnection(ctx context.Context, t *testing.T) { // Establish the tunnel config c, err := cluster.NewCluster() require.NoError(t, err) @@ -130,7 +132,7 @@ func testConnection(t *testing.T) { require.NoError(t, err) // Establish the tunnel connection - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) require.NoError(t, err) defer tunnel.Close() @@ -140,14 +142,14 @@ func testConnection(t *testing.T) { require.Equal(t, 200, resp.StatusCode) } -func zarfExec(args ...string) (string, string, error) { - return exec.CmdWithContext(context.TODO(), exec.PrintCfg(), zarf, args...) +func zarfExec(ctx context.Context, args ...string) (string, string, error) { + return exec.CmdWithContext(ctx, exec.PrintCfg(), zarf, args...) } // getZarfVersion returns the current build/zarf version -func getZarfVersion(t *testing.T) string { +func getZarfVersion(ctx context.Context, t *testing.T) string { // Get the version of the CLI - stdOut, stdErr, err := zarfExec("version") + stdOut, stdErr, err := zarfExec(ctx, "version") require.NoError(t, err, stdOut, stdErr) return strings.Trim(stdOut, "\n") } diff --git a/src/internal/packager/git/gitea.go b/src/internal/packager/git/gitea.go index f7bd86b00c..d243337d3c 100644 --- a/src/internal/packager/git/gitea.go +++ b/src/internal/packager/git/gitea.go @@ -6,6 +6,7 @@ package git import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -31,7 +32,7 @@ type CreateTokenResponse struct { } // CreateReadOnlyUser uses the Gitea API to create a non-admin Zarf user. -func (g *Git) CreateReadOnlyUser() error { +func (g *Git) CreateReadOnlyUser(ctx context.Context) error { message.Debugf("git.CreateReadOnlyUser()") c, err := cluster.NewCluster() @@ -44,7 +45,7 @@ func (g *Git) CreateReadOnlyUser() error { if err != nil { return err } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return err } @@ -102,16 +103,16 @@ func (g *Git) CreateReadOnlyUser() error { } // UpdateZarfGiteaUsers updates Zarf gitea users -func (g *Git) UpdateZarfGiteaUsers(oldState *types.ZarfState) error { +func (g *Git) UpdateZarfGiteaUsers(ctx context.Context, oldState *types.ZarfState) error { //Update git read only user password - err := g.UpdateGitUser(oldState.GitServer.PushPassword, g.Server.PullUsername, g.Server.PullPassword) + err := g.UpdateGitUser(ctx, oldState.GitServer.PushPassword, g.Server.PullUsername, g.Server.PullPassword) if err != nil { return fmt.Errorf("unable to update gitea read only user password: %w", err) } // Update Git admin password - err = g.UpdateGitUser(oldState.GitServer.PushPassword, g.Server.PushUsername, g.Server.PushPassword) + err = g.UpdateGitUser(ctx, oldState.GitServer.PushPassword, g.Server.PushUsername, g.Server.PushPassword) if err != nil { return fmt.Errorf("unable to update gitea admin user password: %w", err) } @@ -119,7 +120,7 @@ func (g *Git) UpdateZarfGiteaUsers(oldState *types.ZarfState) error { } // UpdateGitUser updates Zarf git server users -func (g *Git) UpdateGitUser(oldAdminPass string, username string, userpass string) error { +func (g *Git) UpdateGitUser(ctx context.Context, oldAdminPass string, username string, userpass string) error { message.Debugf("git.UpdateGitUser()") c, err := cluster.NewCluster() @@ -131,7 +132,7 @@ func (g *Git) UpdateGitUser(oldAdminPass string, username string, userpass strin if err != nil { return err } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return err } @@ -157,7 +158,7 @@ func (g *Git) UpdateGitUser(oldAdminPass string, username string, userpass strin } // CreatePackageRegistryToken uses the Gitea API to create a package registry token. -func (g *Git) CreatePackageRegistryToken() (CreateTokenResponse, error) { +func (g *Git) CreatePackageRegistryToken(ctx context.Context) (CreateTokenResponse, error) { message.Debugf("git.CreatePackageRegistryToken()") c, err := cluster.NewCluster() @@ -170,7 +171,7 @@ func (g *Git) CreatePackageRegistryToken() (CreateTokenResponse, error) { if err != nil { return CreateTokenResponse{}, err } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return CreateTokenResponse{}, err } @@ -245,7 +246,7 @@ func (g *Git) CreatePackageRegistryToken() (CreateTokenResponse, error) { } // UpdateGiteaPVC updates the existing Gitea persistent volume claim and tells Gitea whether to create or not. -func UpdateGiteaPVC(shouldRollBack bool) (string, error) { +func UpdateGiteaPVC(ctx context.Context, shouldRollBack bool) (string, error) { c, err := cluster.NewCluster() if err != nil { return "false", err @@ -260,12 +261,12 @@ func UpdateGiteaPVC(shouldRollBack bool) (string, error) { annotations := map[string]string{"meta.helm.sh/release-name": "zarf-gitea", "meta.helm.sh/release-namespace": "zarf"} if shouldRollBack { - err = c.K8s.RemoveLabelsAndAnnotations(cluster.ZarfNamespaceName, pvcName, groupKind, labels, annotations) + err = c.K8s.RemoveLabelsAndAnnotations(ctx, cluster.ZarfNamespaceName, pvcName, groupKind, labels, annotations) return "false", err } if pvcName == "data-zarf-gitea-0" { - err = c.K8s.AddLabelsAndAnnotations(cluster.ZarfNamespaceName, pvcName, groupKind, labels, annotations) + err = c.K8s.AddLabelsAndAnnotations(ctx, cluster.ZarfNamespaceName, pvcName, groupKind, labels, annotations) return "true", err } diff --git a/src/internal/packager/helm/post-render.go b/src/internal/packager/helm/post-render.go index 7cee486d99..8b99cd14f0 100644 --- a/src/internal/packager/helm/post-render.go +++ b/src/internal/packager/helm/post-render.go @@ -6,6 +6,7 @@ package helm import ( "bytes" + "context" "fmt" "os" "path/filepath" @@ -80,13 +81,14 @@ func (r *renderer) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) { finalManifestsOutput := bytes.NewBuffer(nil) - // Otherwise, loop over the resources, if r.cluster != nil { - if err := r.editHelmResources(resources, finalManifestsOutput); err != nil { + ctx := context.Background() + + if err := r.editHelmResources(ctx, resources, finalManifestsOutput); err != nil { return nil, err } - if err := r.adoptAndUpdateNamespaces(); err != nil { + if err := r.adoptAndUpdateNamespaces(ctx); err != nil { return nil, err } } else { @@ -99,9 +101,9 @@ func (r *renderer) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) { return finalManifestsOutput, nil } -func (r *renderer) adoptAndUpdateNamespaces() error { +func (r *renderer) adoptAndUpdateNamespaces(ctx context.Context) error { c := r.cluster - existingNamespaces, _ := c.GetNamespaces() + existingNamespaces, _ := c.GetNamespaces(ctx) for name, namespace := range r.namespaces { // Check to see if this namespace already exists @@ -114,7 +116,7 @@ func (r *renderer) adoptAndUpdateNamespaces() error { if !existingNamespace { // This is a new namespace, add it - if _, err := c.CreateNamespace(namespace); err != nil { + if _, err := c.CreateNamespace(ctx, namespace); err != nil { return fmt.Errorf("unable to create the missing namespace %s", name) } } else if r.cfg.DeployOpts.AdoptExistingResources { @@ -123,7 +125,7 @@ func (r *renderer) adoptAndUpdateNamespaces() error { message.Warnf("Refusing to adopt the initial namespace: %s", name) } else { // This is an existing namespace to adopt - if _, err := c.UpdateNamespace(namespace); err != nil { + if _, err := c.UpdateNamespace(ctx, namespace); err != nil { return fmt.Errorf("unable to adopt the existing namespace %s", name) } } @@ -138,10 +140,10 @@ func (r *renderer) adoptAndUpdateNamespaces() error { validRegistrySecret := c.GenerateRegistryPullCreds(name, config.ZarfImagePullSecretName, r.state.RegistryInfo) // Try to get a valid existing secret - currentRegistrySecret, _ := c.GetSecret(name, config.ZarfImagePullSecretName) + currentRegistrySecret, _ := c.GetSecret(ctx, name, config.ZarfImagePullSecretName) if currentRegistrySecret.Name != config.ZarfImagePullSecretName || !reflect.DeepEqual(currentRegistrySecret.Data, validRegistrySecret.Data) { // Create or update the zarf registry secret - if _, err := c.CreateOrUpdateSecret(validRegistrySecret); err != nil { + if _, err := c.CreateOrUpdateSecret(ctx, validRegistrySecret); err != nil { message.WarnErrf(err, "Problem creating registry secret for the %s namespace", name) } @@ -149,7 +151,7 @@ func (r *renderer) adoptAndUpdateNamespaces() error { gitServerSecret := c.GenerateGitPullCreds(name, config.ZarfGitServerSecretName, r.state.GitServer) // Create or update the zarf git server secret - if _, err := c.CreateOrUpdateSecret(gitServerSecret); err != nil { + if _, err := c.CreateOrUpdateSecret(ctx, gitServerSecret); err != nil { message.WarnErrf(err, "Problem creating git server secret for the %s namespace", name) } } @@ -157,7 +159,7 @@ func (r *renderer) adoptAndUpdateNamespaces() error { return nil } -func (r *renderer) editHelmResources(resources []releaseutil.Manifest, finalManifestsOutput *bytes.Buffer) error { +func (r *renderer) editHelmResources(ctx context.Context, resources []releaseutil.Manifest, finalManifestsOutput *bytes.Buffer) error { for _, resource := range resources { // parse to unstructured to have access to more data than just the name rawData := &unstructured.Unstructured{} @@ -223,7 +225,7 @@ func (r *renderer) editHelmResources(resources []releaseutil.Manifest, finalMani "meta.helm.sh/release-namespace": r.chart.Namespace, } - if err := r.cluster.AddLabelsAndAnnotations(deployedNamespace, rawData.GetName(), rawData.GroupVersionKind().GroupKind(), helmLabels, helmAnnotations); err != nil { + if err := r.cluster.AddLabelsAndAnnotations(ctx, deployedNamespace, rawData.GetName(), rawData.GroupVersionKind().GroupKind(), helmLabels, helmAnnotations); err != nil { // Print a debug message since this could just be because the resource doesn't exist message.Debugf("Unable to adopt resource %s: %s", rawData.GetName(), err.Error()) } diff --git a/src/internal/packager/helm/zarf.go b/src/internal/packager/helm/zarf.go index c6db1299eb..94996b4173 100644 --- a/src/internal/packager/helm/zarf.go +++ b/src/internal/packager/helm/zarf.go @@ -5,6 +5,7 @@ package helm import ( + "context" "fmt" "github.com/defenseunicorns/zarf/src/internal/packager/template" @@ -50,7 +51,7 @@ func (h *Helm) UpdateZarfRegistryValues() error { } // UpdateZarfAgentValues updates the Zarf agent deployment with the new state values -func (h *Helm) UpdateZarfAgentValues() error { +func (h *Helm) UpdateZarfAgentValues(ctx context.Context) error { spinner := message.NewProgressSpinner("Gathering information to update Zarf Agent TLS") defer spinner.Stop() @@ -60,10 +61,14 @@ func (h *Helm) UpdateZarfAgentValues() error { } // Get the current agent image from one of its pods. - pods := h.cluster.WaitForPodsAndContainers(k8s.PodLookup{ - Namespace: cluster.ZarfNamespaceName, - Selector: "app=agent-hook", - }, nil) + pods := h.cluster.WaitForPodsAndContainers( + ctx, + k8s.PodLookup{ + Namespace: cluster.ZarfNamespaceName, + Selector: "app=agent-hook", + }, + nil, + ) var currentAgentImage transform.Image if len(pods) > 0 && len(pods[0].Spec.Containers) > 0 { @@ -119,10 +124,13 @@ func (h *Helm) UpdateZarfAgentValues() error { defer spinner.Stop() // Force pods to be recreated to get the updated secret. - err = h.cluster.DeletePods(k8s.PodLookup{ - Namespace: cluster.ZarfNamespaceName, - Selector: "app=agent-hook", - }) + err = h.cluster.DeletePods( + ctx, + k8s.PodLookup{ + Namespace: cluster.ZarfNamespaceName, + Selector: "app=agent-hook", + }, + ) if err != nil { return fmt.Errorf("error recycling pods for the Zarf Agent: %w", err) } diff --git a/src/internal/packager/images/push.go b/src/internal/packager/images/push.go index 64e592a5ab..e9a3645335 100644 --- a/src/internal/packager/images/push.go +++ b/src/internal/packager/images/push.go @@ -5,6 +5,7 @@ package images import ( + "context" "fmt" "time" @@ -20,7 +21,7 @@ import ( ) // Push pushes images to a registry. -func Push(cfg PushConfig) error { +func Push(ctx context.Context, cfg PushConfig) error { logs.Warn.SetOutput(&message.DebugWriter{}) logs.Progress.SetOutput(&message.DebugWriter{}) @@ -57,7 +58,7 @@ func Push(cfg PushConfig) error { if err := helpers.Retry(func() error { c, _ := cluster.NewCluster() if c != nil { - registryURL, tunnel, err = c.ConnectToZarfRegistryEndpoint(cfg.RegInfo) + registryURL, tunnel, err = c.ConnectToZarfRegistryEndpoint(ctx, cfg.RegInfo) if err != nil { return err } diff --git a/src/pkg/cluster/common.go b/src/pkg/cluster/common.go index a84a06fee3..8b2153999b 100644 --- a/src/pkg/cluster/common.go +++ b/src/pkg/cluster/common.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "time" "github.com/defenseunicorns/zarf/src/config" @@ -27,19 +28,9 @@ var labels = k8s.Labels{ config.ZarfManagedByLabel: "zarf", } -// NewClusterOrDie creates a new Cluster instance and waits up to 30 seconds for the cluster to be ready or throws a fatal error. -func NewClusterOrDie() *Cluster { - c, err := NewClusterWithWait(DefaultTimeout) - if err != nil { - message.Fatalf(err, "Failed to connect to cluster") - } - - return c -} - // NewClusterWithWait creates a new Cluster instance and waits for the given timeout for the cluster to be ready. -func NewClusterWithWait(timeout time.Duration) (*Cluster, error) { - spinner := message.NewProgressSpinner("Waiting for cluster connection (%s timeout)", timeout.String()) +func NewClusterWithWait(ctx context.Context) (*Cluster, error) { + spinner := message.NewProgressSpinner("Waiting for cluster connection") defer spinner.Stop() c := &Cluster{} @@ -50,7 +41,7 @@ func NewClusterWithWait(timeout time.Duration) (*Cluster, error) { return nil, err } - err = c.WaitForHealthyCluster(timeout) + err = c.WaitForHealthyCluster(ctx) if err != nil { return nil, err } diff --git a/src/pkg/cluster/data.go b/src/pkg/cluster/data.go index d2724c0d1f..0c5e526536 100644 --- a/src/pkg/cluster/data.go +++ b/src/pkg/cluster/data.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "fmt" "os" "path/filepath" @@ -25,7 +26,7 @@ import ( // HandleDataInjection waits for the target pod(s) to come up and inject the data into them // todo: this currently requires kubectl but we should have enough k8s work to make this native now. -func (c *Cluster) HandleDataInjection(wg *sync.WaitGroup, data types.ZarfDataInjection, componentPath *layout.ComponentPaths, dataIdx int) { +func (c *Cluster) HandleDataInjection(ctx context.Context, wg *sync.WaitGroup, data types.ZarfDataInjection, componentPath *layout.ComponentPaths, dataIdx int) { defer wg.Done() injectionCompletionMarker := filepath.Join(componentPath.DataInjections, config.GetDataInjectionMarker()) @@ -74,7 +75,7 @@ iterator: } // Wait until the pod we are injecting data into becomes available - pods := c.WaitForPodsAndContainers(target, podFilterByInitContainer) + pods := c.WaitForPodsAndContainers(ctx, target, podFilterByInitContainer) if len(pods) < 1 { continue } @@ -139,7 +140,7 @@ iterator: // Block one final time to make sure at least one pod has come up and injected the data // Using only the pod as the final selector because we don't know what the container name will be // Still using the init container filter to make sure we have the right running pod - _ = c.WaitForPodsAndContainers(podOnlyTarget, podFilterByInitContainer) + _ = c.WaitForPodsAndContainers(ctx, podOnlyTarget, podFilterByInitContainer) // Cleanup now to reduce disk pressure _ = os.RemoveAll(source) diff --git a/src/pkg/cluster/injector.go b/src/pkg/cluster/injector.go index 716a260b96..dd2a918f98 100644 --- a/src/pkg/cluster/injector.go +++ b/src/pkg/cluster/injector.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "fmt" "net/http" "os" @@ -41,7 +42,7 @@ var ( type imageNodeMap map[string][]string // StartInjectionMadness initializes a Zarf injection into the cluster. -func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injectorSeedSrcs []string) { +func (c *Cluster) StartInjectionMadness(ctx context.Context, tmpDir string, imagesDir string, injectorSeedSrcs []string) { spinner := message.NewProgressSpinner("Attempting to bootstrap the seed image into the cluster") defer spinner.Stop() @@ -64,19 +65,20 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto var seedImages []transform.Image // Get all the images from the cluster - timeout := 5 * time.Minute - spinner.Updatef("Getting the list of existing cluster images (%s timeout)", timeout.String()) - if images, err = c.getImagesAndNodesForInjection(timeout); err != nil { + spinner.Updatef("Getting the list of existing cluster images") + findImagesCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + if images, err = c.getImagesAndNodesForInjection(findImagesCtx); err != nil { spinner.Fatalf(err, "Unable to generate a list of candidate images to perform the registry injection") } spinner.Updatef("Creating the injector configmap") - if err = c.createInjectorConfigmap(tmp.InjectionBinary); err != nil { + if err = c.createInjectorConfigmap(ctx, tmp.InjectionBinary); err != nil { spinner.Fatalf(err, "Unable to create the injector configmap") } spinner.Updatef("Creating the injector service") - if service, err := c.createService(); err != nil { + if service, err := c.createService(ctx); err != nil { spinner.Fatalf(err, "Unable to create the injector service") } else { config.ZarfSeedPort = fmt.Sprintf("%d", service.Spec.Ports[0].NodePort) @@ -88,7 +90,7 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto } spinner.Updatef("Loading the seed registry configmaps") - if payloadConfigmaps, sha256sum, err = c.createPayloadConfigmaps(tmp.SeedImagesDir, tmp.InjectorPayloadTarGz, spinner); err != nil { + if payloadConfigmaps, sha256sum, err = c.createPayloadConfigmaps(ctx, tmp.SeedImagesDir, tmp.InjectorPayloadTarGz, spinner); err != nil { spinner.Fatalf(err, "Unable to generate the injector payload configmaps") } @@ -105,7 +107,7 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto spinner.Updatef("Attempting to bootstrap with the %s/%s", node, image) // Make sure the pod is not there first - _ = c.DeletePod(ZarfNamespaceName, "injector") + _ = c.DeletePod(ctx, ZarfNamespaceName, "injector") // Update the podspec image path and use the first node found pod, err := c.buildInjectionPod(node[0], image, payloadConfigmaps, sha256sum) @@ -116,7 +118,7 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto } // Create the pod in the cluster - pod, err = c.CreatePod(pod) + pod, err = c.CreatePod(ctx, pod) if err != nil { // Just debug log the output because failures just result in trying the next image message.Debug(pod, err) @@ -124,7 +126,7 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto } // if no error, try and wait for a seed image to be present, return if successful - if c.injectorIsReady(seedImages, spinner) { + if c.injectorIsReady(ctx, seedImages, spinner) { spinner.Success() return } @@ -137,20 +139,20 @@ func (c *Cluster) StartInjectionMadness(tmpDir string, imagesDir string, injecto } // StopInjectionMadness handles cleanup once the seed registry is up. -func (c *Cluster) StopInjectionMadness() error { +func (c *Cluster) StopInjectionMadness(ctx context.Context) error { // Try to kill the injector pod now - if err := c.DeletePod(ZarfNamespaceName, "injector"); err != nil { + if err := c.DeletePod(ctx, ZarfNamespaceName, "injector"); err != nil { return err } // Remove the configmaps labelMatch := map[string]string{"zarf-injector": "payload"} - if err := c.DeleteConfigMapsByLabel(ZarfNamespaceName, labelMatch); err != nil { + if err := c.DeleteConfigMapsByLabel(ctx, ZarfNamespaceName, labelMatch); err != nil { return err } // Remove the injector service - return c.DeleteService(ZarfNamespaceName, "zarf-injector") + return c.DeleteService(ctx, ZarfNamespaceName, "zarf-injector") } func (c *Cluster) loadSeedImages(imagesDir, seedImagesDir string, injectorSeedSrcs []string, spinner *message.Spinner) ([]transform.Image, error) { @@ -162,34 +164,36 @@ func (c *Cluster) loadSeedImages(imagesDir, seedImagesDir string, injectorSeedSr spinner.Updatef("Loading the seed image '%s' from the package", src) ref, err := transform.ParseImageRef(src) if err != nil { - return seedImages, fmt.Errorf("failed to create ref for image %s: %w", src, err) + return nil, fmt.Errorf("failed to create ref for image %s: %w", src, err) } img, err := utils.LoadOCIImage(imagesDir, ref) if err != nil { - return seedImages, err + return nil, err } - crane.SaveOCI(img, seedImagesDir) + if err := crane.SaveOCI(img, seedImagesDir); err != nil { + return nil, err + } seedImages = append(seedImages, ref) // Get the image digest so we can set an annotation in the image.json later imgDigest, err := img.Digest() if err != nil { - return seedImages, err + return nil, err } // This is done _without_ the domain (different from pull.go) since the injector only handles local images localReferenceToDigest[ref.Path+ref.TagOrDigest] = imgDigest.String() } if err := utils.AddImageNameAnnotation(seedImagesDir, localReferenceToDigest); err != nil { - return seedImages, fmt.Errorf("unable to format OCI layout: %w", err) + return nil, fmt.Errorf("unable to format OCI layout: %w", err) } return seedImages, nil } -func (c *Cluster) createPayloadConfigmaps(seedImagesDir, tarPath string, spinner *message.Spinner) ([]string, string, error) { +func (c *Cluster) createPayloadConfigmaps(ctx context.Context, seedImagesDir, tarPath string, spinner *message.Spinner) ([]string, string, error) { var configMaps []string // Chunk size has to accommodate base64 encoding & etcd 1MB limit @@ -226,7 +230,7 @@ func (c *Cluster) createPayloadConfigmaps(seedImagesDir, tarPath string, spinner spinner.Updatef("Adding archive binary configmap %d of %d to the cluster", idx+1, chunkCount) // Attempt to create the configmap in the cluster - if _, err = c.ReplaceConfigmap(ZarfNamespaceName, fileName, configData); err != nil { + if _, err = c.ReplaceConfigmap(ctx, ZarfNamespaceName, fileName, configData); err != nil { return configMaps, "", err } @@ -241,13 +245,13 @@ func (c *Cluster) createPayloadConfigmaps(seedImagesDir, tarPath string, spinner } // Test for pod readiness and seed image presence. -func (c *Cluster) injectorIsReady(seedImages []transform.Image, spinner *message.Spinner) bool { +func (c *Cluster) injectorIsReady(ctx context.Context, seedImages []transform.Image, spinner *message.Spinner) bool { tunnel, err := c.NewTunnel(ZarfNamespaceName, k8s.SvcResource, ZarfInjectorName, "", 0, ZarfInjectorPort) if err != nil { return false } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return false } @@ -276,7 +280,7 @@ func (c *Cluster) injectorIsReady(seedImages []transform.Image, spinner *message return true } -func (c *Cluster) createInjectorConfigmap(binaryPath string) error { +func (c *Cluster) createInjectorConfigmap(ctx context.Context, binaryPath string) error { var err error configData := make(map[string][]byte) @@ -286,17 +290,17 @@ func (c *Cluster) createInjectorConfigmap(binaryPath string) error { } // Try to delete configmap silently - _ = c.DeleteConfigmap(ZarfNamespaceName, "rust-binary") + _ = c.DeleteConfigmap(ctx, ZarfNamespaceName, "rust-binary") // Attempt to create the configmap in the cluster - if _, err = c.CreateConfigmap(ZarfNamespaceName, "rust-binary", configData); err != nil { + if _, err = c.CreateConfigmap(ctx, ZarfNamespaceName, "rust-binary", configData); err != nil { return err } return nil } -func (c *Cluster) createService() (*corev1.Service, error) { +func (c *Cluster) createService(ctx context.Context) (*corev1.Service, error) { service := c.GenerateService(ZarfNamespaceName, "zarf-injector") service.Spec.Type = corev1.ServiceTypeNodePort @@ -308,9 +312,9 @@ func (c *Cluster) createService() (*corev1.Service, error) { } // Attempt to purse the service silently - _ = c.DeleteService(ZarfNamespaceName, "zarf-injector") + _ = c.DeleteService(ctx, ZarfNamespaceName, "zarf-injector") - return c.CreateService(service) + return c.CreateService(ctx, service) } // buildInjectionPod return a pod for injection with the appropriate containers to perform the injection. @@ -432,66 +436,61 @@ func (c *Cluster) buildInjectionPod(node, image string, payloadConfigmaps []stri return pod, nil } -// GetImagesFromAvailableNodes checks for images on schedulable nodes within a cluster and returns -func (c *Cluster) getImagesAndNodesForInjection(timeoutDuration time.Duration) (imageNodeMap, error) { - timeout := time.After(timeoutDuration) +// getImagesAndNodesForInjection checks for images on schedulable nodes within a cluster. +func (c *Cluster) getImagesAndNodesForInjection(ctx context.Context) (imageNodeMap, error) { result := make(imageNodeMap) + timer := time.NewTimer(0) + defer timer.Stop() + for { select { - - // On timeout abort - case <-timeout: - return nil, fmt.Errorf("get image list timed-out") - - // After delay, try running - default: - pods, err := c.GetPods(corev1.NamespaceAll, metav1.ListOptions{ + case <-ctx.Done(): + return nil, fmt.Errorf("get image list timed-out: %w", ctx.Err()) + case <-timer.C: + pods, err := c.GetPods(ctx, corev1.NamespaceAll, metav1.ListOptions{ FieldSelector: fmt.Sprintf("status.phase=%s", corev1.PodRunning), }) if err != nil { return nil, fmt.Errorf("unable to get the list of %q pods in the cluster: %w", corev1.PodRunning, err) } - findImages: for _, pod := range pods.Items { nodeName := pod.Spec.NodeName - nodeDetails, err := c.GetNode(nodeName) + nodeDetails, err := c.GetNode(ctx, nodeName) if err != nil { return nil, fmt.Errorf("unable to get the node %q: %w", nodeName, err) } if nodeDetails.Status.Allocatable.Cpu().Cmp(injectorRequestedCPU) < 0 || nodeDetails.Status.Allocatable.Memory().Cmp(injectorRequestedMemory) < 0 { - continue findImages + continue } for _, taint := range nodeDetails.Spec.Taints { if taint.Effect == corev1.TaintEffectNoSchedule || taint.Effect == corev1.TaintEffectNoExecute { - continue findImages + continue } } for _, container := range pod.Spec.InitContainers { result[container.Image] = append(result[container.Image], nodeName) } - for _, container := range pod.Spec.Containers { result[container.Image] = append(result[container.Image], nodeName) } - for _, container := range pod.Spec.EphemeralContainers { result[container.Image] = append(result[container.Image], nodeName) } } - } - if len(result) < 1 { - c.Log("no images found: %w") - time.Sleep(2 * time.Second) - } else { - return result, nil + if len(result) > 0 { + return result, nil + } + + c.Log("No images found on any node. Retrying...") + timer.Reset(2 * time.Second) } } } diff --git a/src/pkg/cluster/namespace.go b/src/pkg/cluster/namespace.go index 82b4277901..a7209936b3 100644 --- a/src/pkg/cluster/namespace.go +++ b/src/pkg/cluster/namespace.go @@ -11,9 +11,9 @@ import ( ) // DeleteZarfNamespace deletes the Zarf namespace from the connected cluster. -func (c *Cluster) DeleteZarfNamespace() { +func (c *Cluster) DeleteZarfNamespace(ctx context.Context) error { spinner := message.NewProgressSpinner("Deleting the zarf namespace from this cluster") defer spinner.Stop() - c.DeleteNamespace(context.TODO(), ZarfNamespaceName) + return c.DeleteNamespace(ctx, ZarfNamespaceName) } diff --git a/src/pkg/cluster/secrets.go b/src/pkg/cluster/secrets.go index 2cdb1a20d1..17183d5e62 100644 --- a/src/pkg/cluster/secrets.go +++ b/src/pkg/cluster/secrets.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "encoding/base64" "encoding/json" "reflect" @@ -73,16 +74,16 @@ func (c *Cluster) GenerateGitPullCreds(namespace, name string, gitServerInfo typ } // UpdateZarfManagedImageSecrets updates all Zarf-managed image secrets in all namespaces based on state -func (c *Cluster) UpdateZarfManagedImageSecrets(state *types.ZarfState) { +func (c *Cluster) UpdateZarfManagedImageSecrets(ctx context.Context, state *types.ZarfState) { spinner := message.NewProgressSpinner("Updating existing Zarf-managed image secrets") defer spinner.Stop() - if namespaces, err := c.GetNamespaces(); err != nil { + if namespaces, err := c.GetNamespaces(ctx); err != nil { spinner.Errorf(err, "Unable to get k8s namespaces") } else { // Update all image pull secrets for _, namespace := range namespaces.Items { - currentRegistrySecret, err := c.GetSecret(namespace.Name, config.ZarfImagePullSecretName) + currentRegistrySecret, err := c.GetSecret(ctx, namespace.Name, config.ZarfImagePullSecretName) if err != nil { continue } @@ -96,7 +97,7 @@ func (c *Cluster) UpdateZarfManagedImageSecrets(state *types.ZarfState) { newRegistrySecret := c.GenerateRegistryPullCreds(namespace.Name, config.ZarfImagePullSecretName, state.RegistryInfo) if !reflect.DeepEqual(currentRegistrySecret.Data, newRegistrySecret.Data) { // Create or update the zarf registry secret - if _, err := c.CreateOrUpdateSecret(newRegistrySecret); err != nil { + if _, err := c.CreateOrUpdateSecret(ctx, newRegistrySecret); err != nil { message.WarnErrf(err, "Problem creating registry secret for the %s namespace", namespace.Name) } } @@ -107,16 +108,16 @@ func (c *Cluster) UpdateZarfManagedImageSecrets(state *types.ZarfState) { } // UpdateZarfManagedGitSecrets updates all Zarf-managed git secrets in all namespaces based on state -func (c *Cluster) UpdateZarfManagedGitSecrets(state *types.ZarfState) { +func (c *Cluster) UpdateZarfManagedGitSecrets(ctx context.Context, state *types.ZarfState) { spinner := message.NewProgressSpinner("Updating existing Zarf-managed git secrets") defer spinner.Stop() - if namespaces, err := c.GetNamespaces(); err != nil { + if namespaces, err := c.GetNamespaces(ctx); err != nil { spinner.Errorf(err, "Unable to get k8s namespaces") } else { // Update all git pull secrets for _, namespace := range namespaces.Items { - currentGitSecret, err := c.GetSecret(namespace.Name, config.ZarfGitServerSecretName) + currentGitSecret, err := c.GetSecret(ctx, namespace.Name, config.ZarfGitServerSecretName) if err != nil { continue } @@ -130,7 +131,7 @@ func (c *Cluster) UpdateZarfManagedGitSecrets(state *types.ZarfState) { newGitSecret := c.GenerateGitPullCreds(namespace.Name, config.ZarfGitServerSecretName, state.GitServer) if !reflect.DeepEqual(currentGitSecret.StringData, newGitSecret.StringData) { // Create or update the zarf git secret - if _, err := c.CreateOrUpdateSecret(newGitSecret); err != nil { + if _, err := c.CreateOrUpdateSecret(ctx, newGitSecret); err != nil { message.WarnErrf(err, "Problem creating git server secret for the %s namespace", namespace.Name) } } diff --git a/src/pkg/cluster/state.go b/src/pkg/cluster/state.go index ebac18fbd3..a2585c4361 100644 --- a/src/pkg/cluster/state.go +++ b/src/pkg/cluster/state.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "encoding/json" "fmt" "time" @@ -34,7 +35,7 @@ const ( ) // InitZarfState initializes the Zarf state with the given temporary directory and init configs. -func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { +func (c *Cluster) InitZarfState(ctx context.Context, initOptions types.ZarfInitOptions) error { var ( distro string err error @@ -46,7 +47,7 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { // Attempt to load an existing state prior to init. // NOTE: We are ignoring the error here because we don't really expect a state to exist yet. spinner.Updatef("Checking cluster for existing Zarf deployment") - state, _ := c.LoadZarfState() + state, _ := c.LoadZarfState(ctx) // If state is nil, this is a new cluster. if state == nil { @@ -59,7 +60,7 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { state.ZarfAppliance = true } else { // Otherwise, trying to detect the K8s distro type. - distro, err = c.DetectDistro() + distro, err = c.DetectDistro(ctx) if err != nil { // This is a basic failure right now but likely could be polished to provide user guidance to resolve. return fmt.Errorf("unable to connect to the cluster to verify the distro: %w", err) @@ -79,7 +80,7 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { // Setup zarf agent PKI state.AgentTLS = pki.GeneratePKI(config.ZarfAgentHost) - namespaces, err := c.GetNamespaces() + namespaces, err := c.GetNamespaces(ctx) if err != nil { return fmt.Errorf("unable to get the Kubernetes namespaces: %w", err) } @@ -93,7 +94,7 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { // This label will tell the Zarf Agent to ignore this namespace. namespace.Labels[agentLabel] = "ignore" namespaceCopy := namespace - if _, err = c.UpdateNamespace(&namespaceCopy); err != nil { + if _, err = c.UpdateNamespace(ctx, &namespaceCopy); err != nil { // This is not a hard failure, but we should log it. message.WarnErrf(err, "Unable to mark the namespace %s as ignored by Zarf Agent", namespace.Name) } @@ -102,14 +103,16 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { // Try to create the zarf namespace. spinner.Updatef("Creating the Zarf namespace") zarfNamespace := c.NewZarfManagedNamespace(ZarfNamespaceName) - if _, err := c.CreateNamespace(zarfNamespace); err != nil { + if _, err := c.CreateNamespace(ctx, zarfNamespace); err != nil { return fmt.Errorf("unable to create the zarf namespace: %w", err) } // Wait up to 2 minutes for the default service account to be created. // Some clusters seem to take a while to create this, see https://github.com/kubernetes/kubernetes/issues/66689. // The default SA is required for pods to start properly. - if _, err := c.WaitForServiceAccount(ZarfNamespaceName, "default", 2*time.Minute); err != nil { + saCtx, cancel := context.WithTimeout(ctx, 2*time.Minute) + defer cancel() + if _, err := c.WaitForServiceAccount(saCtx, ZarfNamespaceName, "default"); err != nil { return fmt.Errorf("unable get default Zarf service account: %w", err) } @@ -158,7 +161,7 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { spinner.Success() // Save the state back to K8s - if err := c.SaveZarfState(state); err != nil { + if err := c.SaveZarfState(ctx, state); err != nil { return fmt.Errorf("unable to save the Zarf state: %w", err) } @@ -166,9 +169,9 @@ func (c *Cluster) InitZarfState(initOptions types.ZarfInitOptions) error { } // LoadZarfState returns the current zarf/zarf-state secret data or an empty ZarfState. -func (c *Cluster) LoadZarfState() (state *types.ZarfState, err error) { +func (c *Cluster) LoadZarfState(ctx context.Context) (state *types.ZarfState, err error) { // Set up the API connection - secret, err := c.GetSecret(ZarfNamespaceName, ZarfStateSecretName) + secret, err := c.GetSecret(ctx, ZarfNamespaceName, ZarfStateSecretName) if err != nil { return nil, fmt.Errorf("%w. %s", err, message.ColorWrap("Did you remember to zarf init?", color.Bold)) } @@ -218,7 +221,7 @@ func (c *Cluster) debugPrintZarfState(state *types.ZarfState) { } // SaveZarfState takes a given state and persists it to the Zarf/zarf-state secret. -func (c *Cluster) SaveZarfState(state *types.ZarfState) error { +func (c *Cluster) SaveZarfState(ctx context.Context, state *types.ZarfState) error { c.debugPrintZarfState(state) // Convert the data back to JSON. @@ -249,7 +252,7 @@ func (c *Cluster) SaveZarfState(state *types.ZarfState) error { } // Attempt to create or update the secret and return. - if _, err := c.CreateOrUpdateSecret(secret); err != nil { + if _, err := c.CreateOrUpdateSecret(ctx, secret); err != nil { return fmt.Errorf("unable to create the zarf state secret") } diff --git a/src/pkg/cluster/tunnel.go b/src/pkg/cluster/tunnel.go index d52c606ecc..c960fb1e8e 100644 --- a/src/pkg/cluster/tunnel.go +++ b/src/pkg/cluster/tunnel.go @@ -5,6 +5,7 @@ package cluster import ( + "context" "fmt" "strings" @@ -54,8 +55,8 @@ func NewTunnelInfo(namespace, resourceType, resourceName, urlSuffix string, loca } // PrintConnectTable will print a table of all Zarf connect matches found in the cluster. -func (c *Cluster) PrintConnectTable() error { - list, err := c.GetServicesByLabelExists(v1.NamespaceAll, config.ZarfConnectLabelName) +func (c *Cluster) PrintConnectTable(ctx context.Context) error { + list, err := c.GetServicesByLabelExists(ctx, v1.NamespaceAll, config.ZarfConnectLabelName) if err != nil { return err } @@ -78,7 +79,7 @@ func (c *Cluster) PrintConnectTable() error { } // Connect will establish a tunnel to the specified target. -func (c *Cluster) Connect(target string) (*k8s.Tunnel, error) { +func (c *Cluster) Connect(ctx context.Context, target string) (*k8s.Tunnel, error) { var err error zt := TunnelInfo{ namespace: ZarfNamespaceName, @@ -107,7 +108,7 @@ func (c *Cluster) Connect(target string) (*k8s.Tunnel, error) { default: if target != "" { - if zt, err = c.checkForZarfConnectLabel(target); err != nil { + if zt, err = c.checkForZarfConnectLabel(ctx, target); err != nil { return nil, fmt.Errorf("problem looking for a zarf connect label in the cluster: %s", err.Error()) } } @@ -120,17 +121,17 @@ func (c *Cluster) Connect(target string) (*k8s.Tunnel, error) { } } - return c.ConnectTunnelInfo(zt) + return c.ConnectTunnelInfo(ctx, zt) } // ConnectTunnelInfo connects to the cluster with the provided TunnelInfo -func (c *Cluster) ConnectTunnelInfo(zt TunnelInfo) (*k8s.Tunnel, error) { +func (c *Cluster) ConnectTunnelInfo(ctx context.Context, zt TunnelInfo) (*k8s.Tunnel, error) { tunnel, err := c.NewTunnel(zt.namespace, zt.resourceType, zt.resourceName, zt.urlSuffix, zt.localPort, zt.remotePort) if err != nil { return nil, err } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return nil, err } @@ -139,7 +140,7 @@ func (c *Cluster) ConnectTunnelInfo(zt TunnelInfo) (*k8s.Tunnel, error) { } // ConnectToZarfRegistryEndpoint determines if a registry endpoint is in cluster, and if so opens a tunnel to connect to it -func (c *Cluster) ConnectToZarfRegistryEndpoint(registryInfo types.RegistryInfo) (string, *k8s.Tunnel, error) { +func (c *Cluster) ConnectToZarfRegistryEndpoint(ctx context.Context, registryInfo types.RegistryInfo) (string, *k8s.Tunnel, error) { registryEndpoint := registryInfo.Address var err error @@ -150,7 +151,7 @@ func (c *Cluster) ConnectToZarfRegistryEndpoint(registryInfo types.RegistryInfo) return "", tunnel, err } } else { - svcInfo, err := c.ServiceInfoFromNodePortURL(registryInfo.Address) + svcInfo, err := c.ServiceInfoFromNodePortURL(ctx, registryInfo.Address) // If this is a service (no error getting svcInfo), create a port-forward tunnel to that resource if err == nil { @@ -161,7 +162,7 @@ func (c *Cluster) ConnectToZarfRegistryEndpoint(registryInfo types.RegistryInfo) } if tunnel != nil { - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return "", tunnel, err } @@ -172,13 +173,13 @@ func (c *Cluster) ConnectToZarfRegistryEndpoint(registryInfo types.RegistryInfo) } // checkForZarfConnectLabel looks in the cluster for a connect name that matches the target -func (c *Cluster) checkForZarfConnectLabel(name string) (TunnelInfo, error) { +func (c *Cluster) checkForZarfConnectLabel(ctx context.Context, name string) (TunnelInfo, error) { var err error var zt TunnelInfo message.Debugf("Looking for a Zarf Connect Label in the cluster") - matches, err := c.GetServicesByLabel("", config.ZarfConnectLabelName, name) + matches, err := c.GetServicesByLabel(ctx, "", config.ZarfConnectLabelName, name) if err != nil { return zt, fmt.Errorf("unable to lookup the service: %w", err) } @@ -195,7 +196,7 @@ func (c *Cluster) checkForZarfConnectLabel(name string) (TunnelInfo, error) { zt.remotePort = svc.Spec.Ports[0].TargetPort.IntValue() // if targetPort == 0, look for Port (which is required) if zt.remotePort == 0 { - zt.remotePort = c.FindPodContainerPort(svc) + zt.remotePort = c.FindPodContainerPort(ctx, svc) } // Add the url suffix too. diff --git a/src/pkg/cluster/zarf.go b/src/pkg/cluster/zarf.go index afef2362b3..3522dde6b5 100644 --- a/src/pkg/cluster/zarf.go +++ b/src/pkg/cluster/zarf.go @@ -7,7 +7,6 @@ package cluster import ( "context" "encoding/json" - "errors" "fmt" "strings" "time" @@ -23,11 +22,11 @@ import ( // GetDeployedZarfPackages gets metadata information about packages that have been deployed to the cluster. // We determine what packages have been deployed to the cluster by looking for specific secrets in the Zarf namespace. // Returns a list of DeployedPackage structs and a list of errors. -func (c *Cluster) GetDeployedZarfPackages() ([]types.DeployedPackage, []error) { +func (c *Cluster) GetDeployedZarfPackages(ctx context.Context) ([]types.DeployedPackage, []error) { var deployedPackages = []types.DeployedPackage{} var errorList []error // Get the secrets that describe the deployed packages - secrets, err := c.GetSecretsWithLabel(ZarfNamespaceName, ZarfPackageInfoLabel) + secrets, err := c.GetSecretsWithLabel(ctx, ZarfNamespaceName, ZarfPackageInfoLabel) if err != nil { return deployedPackages, append(errorList, err) } @@ -52,9 +51,9 @@ func (c *Cluster) GetDeployedZarfPackages() ([]types.DeployedPackage, []error) { // GetDeployedPackage gets the metadata information about the package name provided (if it exists in the cluster). // We determine what packages have been deployed to the cluster by looking for specific secrets in the Zarf namespace. -func (c *Cluster) GetDeployedPackage(packageName string) (deployedPackage *types.DeployedPackage, err error) { +func (c *Cluster) GetDeployedPackage(ctx context.Context, packageName string) (deployedPackage *types.DeployedPackage, err error) { // Get the secret that describes the deployed package - secret, err := c.GetSecret(ZarfNamespaceName, config.ZarfPackagePrefix+packageName) + secret, err := c.GetSecret(ctx, ZarfNamespaceName, config.ZarfPackagePrefix+packageName) if err != nil { return deployedPackage, err } @@ -63,7 +62,7 @@ func (c *Cluster) GetDeployedPackage(packageName string) (deployedPackage *types } // StripZarfLabelsAndSecretsFromNamespaces removes metadata and secrets from existing namespaces no longer manged by Zarf. -func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces() { +func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces(ctx context.Context) { spinner := message.NewProgressSpinner("Removing zarf metadata & secrets from existing namespaces not managed by Zarf") defer spinner.Stop() @@ -72,7 +71,7 @@ func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces() { LabelSelector: config.ZarfManagedByLabel + "=zarf", } - if namespaces, err := c.GetNamespaces(); err != nil { + if namespaces, err := c.GetNamespaces(ctx); err != nil { spinner.Errorf(err, "Unable to get k8s namespaces") } else { for _, namespace := range namespaces.Items { @@ -80,7 +79,7 @@ func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces() { spinner.Updatef("Removing Zarf Agent label for namespace %s", namespace.Name) delete(namespace.Labels, agentLabel) namespaceCopy := namespace - if _, err = c.UpdateNamespace(&namespaceCopy); err != nil { + if _, err = c.UpdateNamespace(ctx, &namespaceCopy); err != nil { // This is not a hard failure, but we should log it spinner.Errorf(err, "Unable to update the namespace labels for %s", namespace.Name) } @@ -89,7 +88,7 @@ func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces() { spinner.Updatef("Removing Zarf secrets for namespace %s", namespace.Name) err := c.Clientset.CoreV1(). Secrets(namespace.Name). - DeleteCollection(context.TODO(), deleteOptions, listOptions) + DeleteCollection(ctx, deleteOptions, listOptions) if err != nil { spinner.Errorf(err, "Unable to delete secrets from namespace %s", namespace.Name) } @@ -125,9 +124,8 @@ func (c *Cluster) PackageSecretNeedsWait(deployedPackage *types.DeployedPackage, } // RecordPackageDeploymentAndWait records the deployment of a package to the cluster and waits for any webhooks to complete. -func (c *Cluster) RecordPackageDeploymentAndWait(pkg types.ZarfPackage, components []types.DeployedComponent, connectStrings types.ConnectStrings, generation int, component types.ZarfComponent, skipWebhooks bool) (deployedPackage *types.DeployedPackage, err error) { - - deployedPackage, err = c.RecordPackageDeployment(pkg, components, connectStrings, generation) +func (c *Cluster) RecordPackageDeploymentAndWait(ctx context.Context, pkg types.ZarfPackage, components []types.DeployedComponent, connectStrings types.ConnectStrings, generation int, component types.ZarfComponent, skipWebhooks bool) (deployedPackage *types.DeployedPackage, err error) { + deployedPackage, err = c.RecordPackageDeployment(ctx, pkg, components, connectStrings, generation) if err != nil { return nil, err } @@ -135,41 +133,46 @@ func (c *Cluster) RecordPackageDeploymentAndWait(pkg types.ZarfPackage, componen packageNeedsWait, waitSeconds, hookName := c.PackageSecretNeedsWait(deployedPackage, component, skipWebhooks) // If no webhooks need to complete, we can return immediately. if !packageNeedsWait { - return nil, nil + return deployedPackage, nil } - // Timebox the amount of time we wait for a webhook to complete before erroring waitDuration := types.DefaultWebhookWaitDuration if waitSeconds > 0 { waitDuration = time.Duration(waitSeconds) * time.Second } - timeout := time.After(waitDuration) - // We need to wait for this package to finish having webhooks run, create a spinner and keep checking until it's ready - spinner := message.NewProgressSpinner("Waiting for webhook '%s' to complete for component '%s'", hookName, component.Name) + waitCtx, cancel := context.WithTimeout(ctx, waitDuration) + defer cancel() + + spinner := message.NewProgressSpinner("Waiting for webhook %q to complete for component %q", hookName, component.Name) defer spinner.Stop() - for packageNeedsWait { + + timer := time.NewTimer(0) + defer timer.Stop() + + for { select { - // On timeout, abort and return an error. - case <-timeout: - return nil, errors.New("timed out waiting for package deployment to complete") - default: - // Wait for 1 second before checking the secret again - time.Sleep(1 * time.Second) - deployedPackage, err = c.GetDeployedPackage(deployedPackage.Name) + case <-waitCtx.Done(): + return nil, fmt.Errorf("error waiting for webhook %q to complete for component %q: %w", hookName, component.Name, waitCtx.Err()) + case <-timer.C: + deployedPackage, err = c.GetDeployedPackage(ctx, deployedPackage.Name) if err != nil { return nil, err } + packageNeedsWait, _, _ = c.PackageSecretNeedsWait(deployedPackage, component, skipWebhooks) + if !packageNeedsWait { + spinner.Success() + return deployedPackage, nil + } + + timer.Reset(1 * time.Second) } } - - spinner.Success() - return deployedPackage, nil } // RecordPackageDeployment saves metadata about a package that has been deployed to the cluster. -func (c *Cluster) RecordPackageDeployment(pkg types.ZarfPackage, components []types.DeployedComponent, connectStrings types.ConnectStrings, generation int) (deployedPackage *types.DeployedPackage, err error) { +func (c *Cluster) RecordPackageDeployment(ctx context.Context, pkg types.ZarfPackage, components []types.DeployedComponent, connectStrings types.ConnectStrings, generation int) (deployedPackage *types.DeployedPackage, err error) { packageName := pkg.Metadata.Name // Generate a secret that describes the package that is being deployed @@ -179,7 +182,7 @@ func (c *Cluster) RecordPackageDeployment(pkg types.ZarfPackage, components []ty // Attempt to load information about webhooks for the package var componentWebhooks map[string]map[string]types.Webhook - existingPackageSecret, err := c.GetDeployedPackage(packageName) + existingPackageSecret, err := c.GetDeployedPackage(ctx, packageName) if err != nil { message.Debugf("Unable to fetch existing secret for package '%s': %s", packageName, err.Error()) } @@ -205,7 +208,7 @@ func (c *Cluster) RecordPackageDeployment(pkg types.ZarfPackage, components []ty // Update the package secret deployedPackageSecret.Data = map[string][]byte{"data": packageData} var updatedSecret *corev1.Secret - if updatedSecret, err = c.CreateOrUpdateSecret(deployedPackageSecret); err != nil { + if updatedSecret, err = c.CreateOrUpdateSecret(ctx, deployedPackageSecret); err != nil { return nil, fmt.Errorf("failed to record package deployment in secret '%s'", deployedPackageSecret.Name) } @@ -217,8 +220,8 @@ func (c *Cluster) RecordPackageDeployment(pkg types.ZarfPackage, components []ty } // EnableRegHPAScaleDown enables the HPA scale down for the Zarf Registry. -func (c *Cluster) EnableRegHPAScaleDown() error { - hpa, err := c.GetHPA(ZarfNamespaceName, "zarf-docker-registry") +func (c *Cluster) EnableRegHPAScaleDown(ctx context.Context) error { + hpa, err := c.GetHPA(ctx, ZarfNamespaceName, "zarf-docker-registry") if err != nil { return err } @@ -228,7 +231,7 @@ func (c *Cluster) EnableRegHPAScaleDown() error { hpa.Spec.Behavior.ScaleDown.SelectPolicy = &policy // Save the HPA changes. - if _, err = c.UpdateHPA(hpa); err != nil { + if _, err = c.UpdateHPA(ctx, hpa); err != nil { return err } @@ -236,8 +239,8 @@ func (c *Cluster) EnableRegHPAScaleDown() error { } // DisableRegHPAScaleDown disables the HPA scale down for the Zarf Registry. -func (c *Cluster) DisableRegHPAScaleDown() error { - hpa, err := c.GetHPA(ZarfNamespaceName, "zarf-docker-registry") +func (c *Cluster) DisableRegHPAScaleDown(ctx context.Context) error { + hpa, err := c.GetHPA(ctx, ZarfNamespaceName, "zarf-docker-registry") if err != nil { return err } @@ -247,7 +250,7 @@ func (c *Cluster) DisableRegHPAScaleDown() error { hpa.Spec.Behavior.ScaleDown.SelectPolicy = &policy // Save the HPA changes. - if _, err = c.UpdateHPA(hpa); err != nil { + if _, err = c.UpdateHPA(ctx, hpa); err != nil { return err } @@ -255,8 +258,8 @@ func (c *Cluster) DisableRegHPAScaleDown() error { } // GetInstalledChartsForComponent returns any installed Helm Charts for the provided package component. -func (c *Cluster) GetInstalledChartsForComponent(packageName string, component types.ZarfComponent) (installedCharts []types.InstalledChart, err error) { - deployedPackage, err := c.GetDeployedPackage(packageName) +func (c *Cluster) GetInstalledChartsForComponent(ctx context.Context, packageName string, component types.ZarfComponent) (installedCharts []types.InstalledChart, err error) { + deployedPackage, err := c.GetDeployedPackage(ctx, packageName) if err != nil { return installedCharts, err } diff --git a/src/pkg/k8s/common.go b/src/pkg/k8s/common.go index e3472d8962..44027f4492 100644 --- a/src/pkg/k8s/common.go +++ b/src/pkg/k8s/common.go @@ -5,6 +5,7 @@ package k8s import ( + "context" "fmt" "time" @@ -42,35 +43,29 @@ func New(logger Log, defaultLabels Labels) (*K8s, error) { }, nil } -// NewWithWait is a convenience function that creates a new K8s client and waits for the cluster to be healthy. -func NewWithWait(logger Log, defaultLabels Labels, timeout time.Duration) (*K8s, error) { - k, err := New(logger, defaultLabels) - if err != nil { - return nil, err - } +// WaitForHealthyCluster checks for an available K8s cluster every second until timeout. +func (k *K8s) WaitForHealthyCluster(ctx context.Context) error { + var ( + err error + nodes *v1.NodeList + pods *v1.PodList + ) - return k, k.WaitForHealthyCluster(timeout) -} + const waitDuration = 1 * time.Second -// WaitForHealthyCluster checks for an available K8s cluster every second until timeout. -func (k *K8s) WaitForHealthyCluster(timeout time.Duration) error { - var err error - var nodes *v1.NodeList - var pods *v1.PodList - expired := time.After(timeout) + timer := time.NewTimer(0) + defer timer.Stop() for { select { - // on timeout abort - case <-expired: - return fmt.Errorf("timed out waiting for cluster to report healthy") - - // after delay, try running - default: + case <-ctx.Done(): + return fmt.Errorf("error waiting for cluster to report healthy: %w", ctx.Err()) + case <-timer.C: if k.RestConfig == nil || k.Clientset == nil { config, clientset, err := connect() if err != nil { k.Log("Cluster connection not available yet: %w", err) + timer.Reset(waitDuration) continue } @@ -79,31 +74,30 @@ func (k *K8s) WaitForHealthyCluster(timeout time.Duration) error { } // Make sure there is at least one running Node - nodes, err = k.GetNodes() + nodes, err = k.GetNodes(ctx) if err != nil || len(nodes.Items) < 1 { - k.Log("No nodes reporting healthy yet: %#v\n", err) + k.Log("No nodes reporting healthy yet: %v\n", err) + timer.Reset(waitDuration) continue } // Get the cluster pod list - if pods, err = k.GetAllPods(); err != nil { + if pods, err = k.GetAllPods(ctx); err != nil { k.Log("Could not get the pod list: %w", err) + timer.Reset(waitDuration) continue } // Check that at least one pod is in the 'succeeded' or 'running' state for _, pod := range pods.Items { - // If a valid pod is found, return no error if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodRunning { return nil } } k.Log("No pods reported 'succeeded' or 'running' state yet.") + timer.Reset(waitDuration) } - - // delay check 1 seconds - time.Sleep(1 * time.Second) } } diff --git a/src/pkg/k8s/configmap.go b/src/pkg/k8s/configmap.go index 93777f6b3f..57a72c65ae 100644 --- a/src/pkg/k8s/configmap.go +++ b/src/pkg/k8s/configmap.go @@ -14,16 +14,15 @@ import ( ) // ReplaceConfigmap deletes and recreates a configmap. -func (k *K8s) ReplaceConfigmap(namespace, name string, data map[string][]byte) (*corev1.ConfigMap, error) { - if err := k.DeleteConfigmap(namespace, name); err != nil { +func (k *K8s) ReplaceConfigmap(ctx context.Context, namespace, name string, data map[string][]byte) (*corev1.ConfigMap, error) { + if err := k.DeleteConfigmap(ctx, namespace, name); err != nil { return nil, err } - - return k.CreateConfigmap(namespace, name, data) + return k.CreateConfigmap(ctx, namespace, name, data) } // CreateConfigmap applies a configmap to the cluster. -func (k *K8s) CreateConfigmap(namespace, name string, data map[string][]byte) (*corev1.ConfigMap, error) { +func (k *K8s) CreateConfigmap(ctx context.Context, namespace, name string, data map[string][]byte) (*corev1.ConfigMap, error) { configMap := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -34,14 +33,14 @@ func (k *K8s) CreateConfigmap(namespace, name string, data map[string][]byte) (* } createOptions := metav1.CreateOptions{} - return k.Clientset.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap, createOptions) + return k.Clientset.CoreV1().ConfigMaps(namespace).Create(ctx, configMap, createOptions) } // DeleteConfigmap deletes a configmap by name. -func (k *K8s) DeleteConfigmap(namespace, name string) error { +func (k *K8s) DeleteConfigmap(ctx context.Context, namespace, name string) error { namespaceConfigmap := k.Clientset.CoreV1().ConfigMaps(namespace) - err := namespaceConfigmap.Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := namespaceConfigmap.Delete(ctx, name, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("error deleting the configmap: %w", err) } @@ -50,7 +49,7 @@ func (k *K8s) DeleteConfigmap(namespace, name string) error { } // DeleteConfigMapsByLabel deletes a configmap by label(s). -func (k *K8s) DeleteConfigMapsByLabel(namespace string, labels Labels) error { +func (k *K8s) DeleteConfigMapsByLabel(ctx context.Context, namespace string, labels Labels) error { labelSelector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ MatchLabels: labels, }) @@ -59,5 +58,5 @@ func (k *K8s) DeleteConfigMapsByLabel(namespace string, labels Labels) error { LabelSelector: labelSelector.String(), } - return k.Clientset.CoreV1().ConfigMaps(namespace).DeleteCollection(context.TODO(), metaOptions, listOptions) + return k.Clientset.CoreV1().ConfigMaps(namespace).DeleteCollection(ctx, metaOptions, listOptions) } diff --git a/src/pkg/k8s/dynamic.go b/src/pkg/k8s/dynamic.go index daf87c7a1a..59f295f26b 100644 --- a/src/pkg/k8s/dynamic.go +++ b/src/pkg/k8s/dynamic.go @@ -15,17 +15,17 @@ import ( ) // AddLabelsAndAnnotations adds the provided labels and annotations to the specified K8s resource -func (k *K8s) AddLabelsAndAnnotations(resourceNamespace string, resourceName string, groupKind schema.GroupKind, labels map[string]string, annotations map[string]string) error { - return k.updateLabelsAndAnnotations(resourceNamespace, resourceName, groupKind, labels, annotations, false) +func (k *K8s) AddLabelsAndAnnotations(ctx context.Context, resourceNamespace, resourceName string, groupKind schema.GroupKind, labels, annotations map[string]string) error { + return k.updateLabelsAndAnnotations(ctx, resourceNamespace, resourceName, groupKind, labels, annotations, false) } // RemoveLabelsAndAnnotations removes the provided labels and annotations to the specified K8s resource -func (k *K8s) RemoveLabelsAndAnnotations(resourceNamespace string, resourceName string, groupKind schema.GroupKind, labels map[string]string, annotations map[string]string) error { - return k.updateLabelsAndAnnotations(resourceNamespace, resourceName, groupKind, labels, annotations, true) +func (k *K8s) RemoveLabelsAndAnnotations(ctx context.Context, resourceNamespace, resourceName string, groupKind schema.GroupKind, labels, annotations map[string]string) error { + return k.updateLabelsAndAnnotations(ctx, resourceNamespace, resourceName, groupKind, labels, annotations, true) } // updateLabelsAndAnnotations updates the provided labels and annotations to the specified K8s resource -func (k *K8s) updateLabelsAndAnnotations(resourceNamespace string, resourceName string, groupKind schema.GroupKind, labels map[string]string, annotations map[string]string, isRemove bool) error { +func (k *K8s) updateLabelsAndAnnotations(ctx context.Context, resourceNamespace, resourceName string, groupKind schema.GroupKind, labels, annotations map[string]string, isRemove bool) error { dynamicClient := dynamic.NewForConfigOrDie(k.RestConfig) discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(k.RestConfig) @@ -41,7 +41,7 @@ func (k *K8s) updateLabelsAndAnnotations(resourceNamespace string, resourceName return err } - deployedResource, err := dynamicClient.Resource(mapping.Resource).Namespace(resourceNamespace).Get(context.TODO(), resourceName, metav1.GetOptions{}) + deployedResource, err := dynamicClient.Resource(mapping.Resource).Namespace(resourceNamespace).Get(ctx, resourceName, metav1.GetOptions{}) if err != nil { return err } @@ -78,6 +78,6 @@ func (k *K8s) updateLabelsAndAnnotations(resourceNamespace string, resourceName deployedResource.SetAnnotations(deployedAnnotations) - _, err = dynamicClient.Resource(mapping.Resource).Namespace(resourceNamespace).Update(context.TODO(), deployedResource, metav1.UpdateOptions{}) + _, err = dynamicClient.Resource(mapping.Resource).Namespace(resourceNamespace).Update(ctx, deployedResource, metav1.UpdateOptions{}) return err } diff --git a/src/pkg/k8s/hpa.go b/src/pkg/k8s/hpa.go index a159823f67..902f5b1d29 100644 --- a/src/pkg/k8s/hpa.go +++ b/src/pkg/k8s/hpa.go @@ -13,24 +13,24 @@ import ( ) // GetAllHPAs returns a list of horizontal pod autoscalers for all namespaces. -func (k *K8s) GetAllHPAs() (*autoscalingV2.HorizontalPodAutoscalerList, error) { - return k.GetHPAs(corev1.NamespaceAll) +func (k *K8s) GetAllHPAs(ctx context.Context) (*autoscalingV2.HorizontalPodAutoscalerList, error) { + return k.GetHPAs(ctx, corev1.NamespaceAll) } // GetHPAs returns a list of horizontal pod autoscalers in a given namespace. -func (k *K8s) GetHPAs(namespace string) (*autoscalingV2.HorizontalPodAutoscalerList, error) { +func (k *K8s) GetHPAs(ctx context.Context, namespace string) (*autoscalingV2.HorizontalPodAutoscalerList, error) { metaOptions := metav1.ListOptions{} - return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(context.TODO(), metaOptions) + return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(ctx, metaOptions) } // GetHPA returns a single horizontal pod autoscaler by namespace and name. -func (k *K8s) GetHPA(namespace, name string) (*autoscalingV2.HorizontalPodAutoscaler, error) { +func (k *K8s) GetHPA(ctx context.Context, namespace, name string) (*autoscalingV2.HorizontalPodAutoscaler, error) { metaOptions := metav1.GetOptions{} - return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metaOptions) + return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(ctx, name, metaOptions) } // UpdateHPA updates the given horizontal pod autoscaler in the cluster. -func (k *K8s) UpdateHPA(hpa *autoscalingV2.HorizontalPodAutoscaler) (*autoscalingV2.HorizontalPodAutoscaler, error) { +func (k *K8s) UpdateHPA(ctx context.Context, hpa *autoscalingV2.HorizontalPodAutoscaler) (*autoscalingV2.HorizontalPodAutoscaler, error) { metaOptions := metav1.UpdateOptions{} - return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(hpa.Namespace).Update(context.TODO(), hpa, metaOptions) + return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(hpa.Namespace).Update(ctx, hpa, metaOptions) } diff --git a/src/pkg/k8s/info.go b/src/pkg/k8s/info.go index 61effabdaa..edb655b964 100644 --- a/src/pkg/k8s/info.go +++ b/src/pkg/k8s/info.go @@ -5,6 +5,7 @@ package k8s import ( + "context" "errors" "fmt" "regexp" @@ -29,7 +30,7 @@ const ( ) // DetectDistro returns the matching distro or unknown if not found. -func (k *K8s) DetectDistro() (string, error) { +func (k *K8s) DetectDistro(ctx context.Context) (string, error) { kindNodeRegex := regexp.MustCompile(`^kind://`) k3dNodeRegex := regexp.MustCompile(`^k3s://k3d-`) eksNodeRegex := regexp.MustCompile(`^aws:///`) @@ -38,7 +39,7 @@ func (k *K8s) DetectDistro() (string, error) { rke2Regex := regexp.MustCompile(`^rancher/rancher-agent:v2`) tkgRegex := regexp.MustCompile(`^projects\.registry\.vmware\.com/tkg/tanzu_core/`) - nodes, err := k.GetNodes() + nodes, err := k.GetNodes(ctx) if err != nil { return DistroIsUnknown, errors.New("error getting cluster nodes") } @@ -99,7 +100,7 @@ func (k *K8s) DetectDistro() (string, error) { } } - namespaces, err := k.GetNamespaces() + namespaces, err := k.GetNamespaces(ctx) if err != nil { return DistroIsUnknown, errors.New("error getting namespace list") } @@ -115,8 +116,8 @@ func (k *K8s) DetectDistro() (string, error) { } // GetArchitectures returns the cluster system architectures if found. -func (k *K8s) GetArchitectures() ([]string, error) { - nodes, err := k.GetNodes() +func (k *K8s) GetArchitectures(ctx context.Context) ([]string, error) { + nodes, err := k.GetNodes(ctx) if err != nil { return nil, err } diff --git a/src/pkg/k8s/namespace.go b/src/pkg/k8s/namespace.go index 2862731e90..3a63b1ac52 100644 --- a/src/pkg/k8s/namespace.go +++ b/src/pkg/k8s/namespace.go @@ -15,26 +15,26 @@ import ( ) // GetNamespaces returns a list of namespaces in the cluster. -func (k *K8s) GetNamespaces() (*corev1.NamespaceList, error) { +func (k *K8s) GetNamespaces(ctx context.Context) (*corev1.NamespaceList, error) { metaOptions := metav1.ListOptions{} - return k.Clientset.CoreV1().Namespaces().List(context.TODO(), metaOptions) + return k.Clientset.CoreV1().Namespaces().List(ctx, metaOptions) } // UpdateNamespace updates the given namespace in the cluster. -func (k *K8s) UpdateNamespace(namespace *corev1.Namespace) (*corev1.Namespace, error) { +func (k *K8s) UpdateNamespace(ctx context.Context, namespace *corev1.Namespace) (*corev1.Namespace, error) { updateOptions := metav1.UpdateOptions{} - return k.Clientset.CoreV1().Namespaces().Update(context.TODO(), namespace, updateOptions) + return k.Clientset.CoreV1().Namespaces().Update(ctx, namespace, updateOptions) } // CreateNamespace creates the given namespace or returns it if it already exists in the cluster. -func (k *K8s) CreateNamespace(namespace *corev1.Namespace) (*corev1.Namespace, error) { +func (k *K8s) CreateNamespace(ctx context.Context, namespace *corev1.Namespace) (*corev1.Namespace, error) { metaOptions := metav1.GetOptions{} createOptions := metav1.CreateOptions{} - match, err := k.Clientset.CoreV1().Namespaces().Get(context.TODO(), namespace.Name, metaOptions) + match, err := k.Clientset.CoreV1().Namespaces().Get(ctx, namespace.Name, metaOptions) if err != nil || match.Name != namespace.Name { - return k.Clientset.CoreV1().Namespaces().Create(context.TODO(), namespace, createOptions) + return k.Clientset.CoreV1().Namespaces().Create(ctx, namespace, createOptions) } return match, err @@ -45,19 +45,25 @@ func (k *K8s) DeleteNamespace(ctx context.Context, name string) error { // Attempt to delete the namespace immediately gracePeriod := int64(0) err := k.Clientset.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}) - // If an error besides "not found" is returned, return it if err != nil && !errors.IsNotFound(err) { return err } - // Indefinitely wait for the namespace to be deleted, use context.WithTimeout to limit this + timer := time.NewTimer(0) + defer timer.Stop() + for { - // Keep checking for the namespace to be deleted - _, err := k.Clientset.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + _, err := k.Clientset.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } + + timer.Reset(1 * time.Second) } - time.Sleep(1 * time.Second) } } diff --git a/src/pkg/k8s/nodes.go b/src/pkg/k8s/nodes.go index c2348e06f4..134c00b140 100644 --- a/src/pkg/k8s/nodes.go +++ b/src/pkg/k8s/nodes.go @@ -12,12 +12,12 @@ import ( ) // GetNodes returns a list of nodes from the k8s cluster. -func (k *K8s) GetNodes() (*corev1.NodeList, error) { +func (k *K8s) GetNodes(ctx context.Context) (*corev1.NodeList, error) { metaOptions := metav1.ListOptions{} - return k.Clientset.CoreV1().Nodes().List(context.TODO(), metaOptions) + return k.Clientset.CoreV1().Nodes().List(ctx, metaOptions) } // GetNode returns a node from the k8s cluster. -func (k *K8s) GetNode(nodeName string) (*corev1.Node, error) { - return k.Clientset.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func (k *K8s) GetNode(ctx context.Context, nodeName string) (*corev1.Node, error) { + return k.Clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) } diff --git a/src/pkg/k8s/pods.go b/src/pkg/k8s/pods.go index bf29291587..be9c72bec4 100644 --- a/src/pkg/k8s/pods.go +++ b/src/pkg/k8s/pods.go @@ -14,8 +14,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const waitLimit = 30 - // GeneratePod creates a new pod without adding it to the k8s cluster. func (k *K8s) GeneratePod(name, namespace string) *corev1.Pod { pod := &corev1.Pod{ @@ -34,33 +32,42 @@ func (k *K8s) GeneratePod(name, namespace string) *corev1.Pod { } // DeletePod removes a pod from the cluster by namespace & name. -func (k *K8s) DeletePod(namespace string, name string) error { +func (k *K8s) DeletePod(ctx context.Context, namespace string, name string) error { deleteGracePeriod := int64(0) deletePolicy := metav1.DeletePropagationForeground - err := k.Clientset.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{ + + err := k.Clientset.CoreV1().Pods(namespace).Delete(ctx, name, metav1.DeleteOptions{ GracePeriodSeconds: &deleteGracePeriod, PropagationPolicy: &deletePolicy, }) - if err != nil { return err } + timer := time.NewTimer(0) + defer timer.Stop() + for { - // Keep checking for the pod to be deleted - _, err := k.Clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + _, err := k.Clientset.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } + + timer.Reset(1 * time.Second) } - time.Sleep(1 * time.Second) } } // DeletePods removes a collection of pods from the cluster by pod lookup. -func (k *K8s) DeletePods(target PodLookup) error { +func (k *K8s) DeletePods(ctx context.Context, target PodLookup) error { deleteGracePeriod := int64(0) deletePolicy := metav1.DeletePropagationForeground - return k.Clientset.CoreV1().Pods(target.Namespace).DeleteCollection(context.TODO(), + return k.Clientset.CoreV1().Pods(target.Namespace).DeleteCollection( + ctx, metav1.DeleteOptions{ GracePeriodSeconds: &deleteGracePeriod, PropagationPolicy: &deletePolicy, @@ -72,111 +79,115 @@ func (k *K8s) DeletePods(target PodLookup) error { } // CreatePod inserts the given pod into the cluster. -func (k *K8s) CreatePod(pod *corev1.Pod) (*corev1.Pod, error) { +func (k *K8s) CreatePod(ctx context.Context, pod *corev1.Pod) (*corev1.Pod, error) { createOptions := metav1.CreateOptions{} - return k.Clientset.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, createOptions) + return k.Clientset.CoreV1().Pods(pod.Namespace).Create(ctx, pod, createOptions) } // GetAllPods returns a list of pods from the cluster for all namespaces. -func (k *K8s) GetAllPods() (*corev1.PodList, error) { - return k.GetPods(corev1.NamespaceAll, metav1.ListOptions{}) +func (k *K8s) GetAllPods(ctx context.Context) (*corev1.PodList, error) { + return k.GetPods(ctx, corev1.NamespaceAll, metav1.ListOptions{}) } // GetPods returns a list of pods from the cluster by namespace. -func (k *K8s) GetPods(namespace string, listOpts metav1.ListOptions) (*corev1.PodList, error) { - return k.Clientset.CoreV1().Pods(namespace).List(context.TODO(), listOpts) +func (k *K8s) GetPods(ctx context.Context, namespace string, listOpts metav1.ListOptions) (*corev1.PodList, error) { + return k.Clientset.CoreV1().Pods(namespace).List(ctx, listOpts) } // WaitForPodsAndContainers attempts to find pods matching the given selector and optional inclusion filter // It will wait up to 90 seconds for the pods to be found and will return a list of matching pod names // If the timeout is reached, an empty list will be returned. -func (k *K8s) WaitForPodsAndContainers(target PodLookup, include PodFilter) []corev1.Pod { - for count := 0; count < waitLimit; count++ { +func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, include PodFilter) []corev1.Pod { + waitCtx, cancel := context.WithTimeout(ctx, 90*time.Second) + defer cancel() - pods, err := k.Clientset.CoreV1().Pods(target.Namespace).List(context.TODO(), metav1.ListOptions{ - LabelSelector: target.Selector, - }) - if err != nil { - k.Log("Unable to find matching pods: %w", err) - break - } + timer := time.NewTimer(0) + defer timer.Stop() - k.Log("Found %d pods for target %#v", len(pods.Items), target) + for { + select { + case <-waitCtx.Done(): + k.Log("Pod lookup failed: %v", ctx.Err()) + return nil + case <-timer.C: + pods, err := k.GetPods(ctx, target.Namespace, metav1.ListOptions{ + LabelSelector: target.Selector, + }) + if err != nil { + k.Log("Unable to find matching pods: %w", err) + return nil + } - var readyPods = []corev1.Pod{} + k.Log("Found %d pods for target %#v", len(pods.Items), target) - // Sort the pods from newest to oldest - sort.Slice(pods.Items, func(i, j int) bool { - return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) - }) + var readyPods = []corev1.Pod{} - for _, pod := range pods.Items { - k.Log("Testing pod %q", pod.Name) + // Sort the pods from newest to oldest + sort.Slice(pods.Items, func(i, j int) bool { + return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) + }) - // If an include function is provided, only keep pods that return true - if include != nil && !include(pod) { - continue - } + for _, pod := range pods.Items { + k.Log("Testing pod %q", pod.Name) - // Handle container targeting - if target.Container != "" { - k.Log("Testing pod %q for container %q", pod.Name, target.Container) - var matchesInitContainer bool - - // Check the status of initContainers for a running match - for _, initContainer := range pod.Status.InitContainerStatuses { - isRunning := initContainer.State.Running != nil - if isRunning && initContainer.Name == target.Container { - // On running match in initContainer break this loop - matchesInitContainer = true - readyPods = append(readyPods, pod) - break - } - } - - // Don't check any further if there's already a match - if matchesInitContainer { + // If an include function is provided, only keep pods that return true + if include != nil && !include(pod) { continue } - // Check the status of regular containers for a running match - for _, container := range pod.Status.ContainerStatuses { - isRunning := container.State.Running != nil - if isRunning && container.Name == target.Container { + // Handle container targeting + if target.Container != "" { + k.Log("Testing pod %q for container %q", pod.Name, target.Container) + + // Check the status of initContainers for a running match + for _, initContainer := range pod.Status.InitContainerStatuses { + isRunning := initContainer.State.Running != nil + if initContainer.Name == target.Container && isRunning { + // On running match in initContainer break this loop + readyPods = append(readyPods, pod) + break + } + } + + // Check the status of regular containers for a running match + for _, container := range pod.Status.ContainerStatuses { + isRunning := container.State.Running != nil + if container.Name == target.Container && isRunning { + readyPods = append(readyPods, pod) + break + } + } + } else { + status := pod.Status.Phase + k.Log("Testing pod %q phase, want (%q) got (%q)", pod.Name, corev1.PodRunning, status) + // Regular status checking without a container + if status == corev1.PodRunning { readyPods = append(readyPods, pod) + break } } - } else { - status := pod.Status.Phase - k.Log("Testing pod %q phase, want (%q) got (%q)", pod.Name, corev1.PodRunning, status) - // Regular status checking without a container - if status == corev1.PodRunning { - readyPods = append(readyPods, pod) - } } + if len(readyPods) > 0 { + return readyPods + } + timer.Reset(3 * time.Second) } - - if len(readyPods) > 0 { - return readyPods - } - - time.Sleep(3 * time.Second) } - - k.Log("Pod lookup timeout exceeded") - - return []corev1.Pod{} } // FindPodContainerPort will find a pod's container port from a service and return it. // // Returns 0 if no port is found. -func (k *K8s) FindPodContainerPort(svc corev1.Service) int { +func (k *K8s) FindPodContainerPort(ctx context.Context, svc corev1.Service) int { selectorLabelsOfPods := MakeLabels(svc.Spec.Selector) - pods := k.WaitForPodsAndContainers(PodLookup{ - Namespace: svc.Namespace, - Selector: selectorLabelsOfPods, - }, nil) + pods := k.WaitForPodsAndContainers( + ctx, + PodLookup{ + Namespace: svc.Namespace, + Selector: selectorLabelsOfPods, + }, + nil, + ) for _, pod := range pods { // Find the matching name on the port in the pod diff --git a/src/pkg/k8s/sa.go b/src/pkg/k8s/sa.go index 26e48d134d..38b7624130 100644 --- a/src/pkg/k8s/sa.go +++ b/src/pkg/k8s/sa.go @@ -15,48 +15,50 @@ import ( ) // GetAllServiceAccounts returns a list of services accounts for all namespaces. -func (k *K8s) GetAllServiceAccounts() (*corev1.ServiceAccountList, error) { - return k.GetServiceAccounts(corev1.NamespaceAll) +func (k *K8s) GetAllServiceAccounts(ctx context.Context) (*corev1.ServiceAccountList, error) { + return k.GetServiceAccounts(ctx, corev1.NamespaceAll) } // GetServiceAccounts returns a list of service accounts in a given namespace. -func (k *K8s) GetServiceAccounts(namespace string) (*corev1.ServiceAccountList, error) { +func (k *K8s) GetServiceAccounts(ctx context.Context, namespace string) (*corev1.ServiceAccountList, error) { metaOptions := metav1.ListOptions{} - return k.Clientset.CoreV1().ServiceAccounts(namespace).List(context.TODO(), metaOptions) + return k.Clientset.CoreV1().ServiceAccounts(namespace).List(ctx, metaOptions) } // GetServiceAccount returns a single service account by namespace and name. -func (k *K8s) GetServiceAccount(namespace, name string) (*corev1.ServiceAccount, error) { +func (k *K8s) GetServiceAccount(ctx context.Context, namespace, name string) (*corev1.ServiceAccount, error) { metaOptions := metav1.GetOptions{} - return k.Clientset.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), name, metaOptions) + return k.Clientset.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metaOptions) } // UpdateServiceAccount updates the given service account in the cluster. -func (k *K8s) UpdateServiceAccount(svcAccount *corev1.ServiceAccount) (*corev1.ServiceAccount, error) { +func (k *K8s) UpdateServiceAccount(ctx context.Context, svcAccount *corev1.ServiceAccount) (*corev1.ServiceAccount, error) { metaOptions := metav1.UpdateOptions{} - return k.Clientset.CoreV1().ServiceAccounts(svcAccount.Namespace).Update(context.TODO(), svcAccount, metaOptions) + return k.Clientset.CoreV1().ServiceAccounts(svcAccount.Namespace).Update(ctx, svcAccount, metaOptions) } // WaitForServiceAccount waits for a service account to be created in the cluster. -func (k *K8s) WaitForServiceAccount(ns, name string, timeout time.Duration) (*corev1.ServiceAccount, error) { - expired := time.After(timeout) +func (k *K8s) WaitForServiceAccount(ctx context.Context, ns, name string) (*corev1.ServiceAccount, error) { + timer := time.NewTimer(0) + defer timer.Stop() for { select { - case <-expired: - return nil, fmt.Errorf("timed out waiting for service account %s/%s to exist", ns, name) - - default: - sa, err := k.Clientset.CoreV1().ServiceAccounts(ns).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - time.Sleep(1 * time.Second) - continue - } + case <-ctx.Done(): + return nil, fmt.Errorf("failed to get service account %s/%s: %w", ns, name, ctx.Err()) + case <-timer.C: + sa, err := k.Clientset.CoreV1().ServiceAccounts(ns).Get(ctx, name, metav1.GetOptions{}) + if err == nil { + return sa, nil + } + + if errors.IsNotFound(err) { + k.Log("Service account %s/%s not found, retrying...", ns, name) + } else { return nil, fmt.Errorf("error getting service account %s/%s: %w", ns, name, err) } - return sa, nil + timer.Reset(1 * time.Second) } } } diff --git a/src/pkg/k8s/secrets.go b/src/pkg/k8s/secrets.go index f92882b8d0..d391b97771 100644 --- a/src/pkg/k8s/secrets.go +++ b/src/pkg/k8s/secrets.go @@ -15,14 +15,14 @@ import ( ) // GetSecret returns a Kubernetes secret. -func (k *K8s) GetSecret(namespace, name string) (*corev1.Secret, error) { - return k.Clientset.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func (k *K8s) GetSecret(ctx context.Context, namespace, name string) (*corev1.Secret, error) { + return k.Clientset.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) } // GetSecretsWithLabel returns a list of Kubernetes secrets with the given label. -func (k *K8s) GetSecretsWithLabel(namespace, labelSelector string) (*corev1.SecretList, error) { +func (k *K8s) GetSecretsWithLabel(ctx context.Context, namespace, labelSelector string) (*corev1.SecretList, error) { listOptions := metav1.ListOptions{LabelSelector: labelSelector} - return k.Clientset.CoreV1().Secrets(namespace).List(context.TODO(), listOptions) + return k.Clientset.CoreV1().Secrets(namespace).List(ctx, listOptions) } // GenerateSecret returns a Kubernetes secret object without applying it to the cluster. @@ -58,20 +58,20 @@ func (k *K8s) GenerateTLSSecret(namespace, name string, conf GeneratedPKI) (*cor } // CreateOrUpdateTLSSecret creates or updates a Kubernetes secret with a new TLS secret. -func (k *K8s) CreateOrUpdateTLSSecret(namespace, name string, conf GeneratedPKI) (*corev1.Secret, error) { +func (k *K8s) CreateOrUpdateTLSSecret(ctx context.Context, namespace, name string, conf GeneratedPKI) (*corev1.Secret, error) { secret, err := k.GenerateTLSSecret(namespace, name, conf) if err != nil { return secret, err } - return k.CreateOrUpdateSecret(secret) + return k.CreateOrUpdateSecret(ctx, secret) } // DeleteSecret deletes a Kubernetes secret. -func (k *K8s) DeleteSecret(secret *corev1.Secret) error { +func (k *K8s) DeleteSecret(ctx context.Context, secret *corev1.Secret) error { namespaceSecrets := k.Clientset.CoreV1().Secrets(secret.Namespace) - err := namespaceSecrets.Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}) + err := namespaceSecrets.Delete(ctx, secret.Name, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("error deleting the secret: %w", err) } @@ -80,18 +80,18 @@ func (k *K8s) DeleteSecret(secret *corev1.Secret) error { } // CreateOrUpdateSecret creates or updates a Kubernetes secret. -func (k *K8s) CreateOrUpdateSecret(secret *corev1.Secret) (createdSecret *corev1.Secret, err error) { +func (k *K8s) CreateOrUpdateSecret(ctx context.Context, secret *corev1.Secret) (createdSecret *corev1.Secret, err error) { namespaceSecrets := k.Clientset.CoreV1().Secrets(secret.Namespace) - if _, err = k.GetSecret(secret.Namespace, secret.Name); err != nil { + if _, err = k.GetSecret(ctx, secret.Namespace, secret.Name); err != nil { // create the given secret - if createdSecret, err = namespaceSecrets.Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if createdSecret, err = namespaceSecrets.Create(ctx, secret, metav1.CreateOptions{}); err != nil { return createdSecret, fmt.Errorf("unable to create the secret: %w", err) } } else { // update the given secret - if createdSecret, err = namespaceSecrets.Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { + if createdSecret, err = namespaceSecrets.Update(ctx, secret, metav1.UpdateOptions{}); err != nil { return createdSecret, fmt.Errorf("unable to update the secret: %w", err) } } diff --git a/src/pkg/k8s/services.go b/src/pkg/k8s/services.go index 725fc2788b..63b847d413 100644 --- a/src/pkg/k8s/services.go +++ b/src/pkg/k8s/services.go @@ -27,12 +27,12 @@ type ServiceInfo struct { } // ReplaceService deletes and re-creates a service. -func (k *K8s) ReplaceService(service *corev1.Service) (*corev1.Service, error) { - if err := k.DeleteService(service.Namespace, service.Name); err != nil { +func (k *K8s) ReplaceService(ctx context.Context, service *corev1.Service) (*corev1.Service, error) { + if err := k.DeleteService(ctx, service.Namespace, service.Name); err != nil { return nil, err } - return k.CreateService(service) + return k.CreateService(ctx, service) } // GenerateService returns a K8s service struct without writing to the cluster. @@ -54,28 +54,28 @@ func (k *K8s) GenerateService(namespace, name string) *corev1.Service { } // DeleteService removes a service from the cluster by namespace and name. -func (k *K8s) DeleteService(namespace, name string) error { - return k.Clientset.CoreV1().Services(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) +func (k *K8s) DeleteService(ctx context.Context, namespace, name string) error { + return k.Clientset.CoreV1().Services(namespace).Delete(ctx, name, metav1.DeleteOptions{}) } // CreateService creates the given service in the cluster. -func (k *K8s) CreateService(service *corev1.Service) (*corev1.Service, error) { +func (k *K8s) CreateService(ctx context.Context, service *corev1.Service) (*corev1.Service, error) { createOptions := metav1.CreateOptions{} - return k.Clientset.CoreV1().Services(service.Namespace).Create(context.TODO(), service, createOptions) + return k.Clientset.CoreV1().Services(service.Namespace).Create(ctx, service, createOptions) } // GetService returns a Kubernetes service resource in the provided namespace with the given name. -func (k *K8s) GetService(namespace, serviceName string) (*corev1.Service, error) { - return k.Clientset.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) +func (k *K8s) GetService(ctx context.Context, namespace, serviceName string) (*corev1.Service, error) { + return k.Clientset.CoreV1().Services(namespace).Get(ctx, serviceName, metav1.GetOptions{}) } // GetServices returns a list of services in the provided namespace. To search all namespaces, pass "" in the namespace arg. -func (k *K8s) GetServices(namespace string) (*corev1.ServiceList, error) { - return k.Clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{}) +func (k *K8s) GetServices(ctx context.Context, namespace string) (*corev1.ServiceList, error) { + return k.Clientset.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{}) } // GetServicesByLabel returns a list of matched services given a label and value. To search all namespaces, pass "" in the namespace arg. -func (k *K8s) GetServicesByLabel(namespace, label, value string) (*corev1.ServiceList, error) { +func (k *K8s) GetServicesByLabel(ctx context.Context, namespace, label, value string) (*corev1.ServiceList, error) { // Create the selector and add the requirement labelSelector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ MatchLabels: Labels{ @@ -84,11 +84,11 @@ func (k *K8s) GetServicesByLabel(namespace, label, value string) (*corev1.Servic }) // Run the query with the selector and return as a ServiceList - return k.Clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()}) + return k.Clientset.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector.String()}) } // GetServicesByLabelExists returns a list of matched services given a label. To search all namespaces, pass "" in the namespace arg. -func (k *K8s) GetServicesByLabelExists(namespace, label string) (*corev1.ServiceList, error) { +func (k *K8s) GetServicesByLabelExists(ctx context.Context, namespace, label string) (*corev1.ServiceList, error) { // Create the selector and add the requirement labelSelector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ @@ -98,12 +98,12 @@ func (k *K8s) GetServicesByLabelExists(namespace, label string) (*corev1.Service }) // Run the query with the selector and return as a ServiceList - return k.Clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()}) + return k.Clientset.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector.String()}) } // ServiceInfoFromNodePortURL takes a nodePortURL and parses it to find the service info for connecting to the cluster. The string is expected to follow the following format: // Example nodePortURL: 127.0.0.1:{PORT}. -func (k *K8s) ServiceInfoFromNodePortURL(nodePortURL string) (*ServiceInfo, error) { +func (k *K8s) ServiceInfoFromNodePortURL(ctx context.Context, nodePortURL string) (*ServiceInfo, error) { // Attempt to parse as normal, if this fails add a scheme to the URL (docker registries don't use schemes) parsedURL, err := url.Parse(nodePortURL) if err != nil { @@ -128,7 +128,7 @@ func (k *K8s) ServiceInfoFromNodePortURL(nodePortURL string) (*ServiceInfo, erro return nil, fmt.Errorf("node port services should use the port range 30000-32767") } - services, err := k.GetServices("") + services, err := k.GetServices(ctx, "") if err != nil { return nil, err } diff --git a/src/pkg/k8s/tunnel.go b/src/pkg/k8s/tunnel.go index a4b910fccf..6c4f46e0cf 100644 --- a/src/pkg/k8s/tunnel.go +++ b/src/pkg/k8s/tunnel.go @@ -7,6 +7,7 @@ package k8s // Forked from https://github.com/gruntwork-io/terratest/blob/v0.38.8/modules/k8s/tunnel.go import ( + "context" "fmt" "io" "net/http" @@ -79,24 +80,36 @@ func (tunnel *Tunnel) Wrap(function func() error) error { } // Connect will establish a tunnel to the specified target. -func (tunnel *Tunnel) Connect() (string, error) { - url, err := tunnel.establish() +func (tunnel *Tunnel) Connect(ctx context.Context) (string, error) { + url, err := tunnel.establish(ctx) // Try to establish the tunnel up to 3 times. if err != nil { tunnel.attempt++ + // If we have exceeded the number of attempts, exit with an error. if tunnel.attempt > 3 { return "", fmt.Errorf("unable to establish tunnel after 3 attempts: %w", err) } + // Otherwise, retry the connection but delay increasing intervals between attempts. delay := tunnel.attempt * 10 tunnel.kube.Log("%s", err.Error()) tunnel.kube.Log("Delay creating tunnel, waiting %d seconds...", delay) - time.Sleep(time.Duration(delay) * time.Second) - url, err = tunnel.Connect() - if err != nil { - return "", err + + timer := time.NewTimer(0) + defer timer.Stop() + + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-timer.C: + url, err = tunnel.Connect(ctx) + if err != nil { + return "", err + } + + timer.Reset(time.Duration(delay) * time.Second) } } @@ -129,7 +142,7 @@ func (tunnel *Tunnel) Close() { } // establish opens a tunnel to a kubernetes resource, as specified by the provided tunnel struct. -func (tunnel *Tunnel) establish() (string, error) { +func (tunnel *Tunnel) establish(ctx context.Context) (string, error) { var err error // Track this locally as we may need to retry if the tunnel fails. @@ -163,7 +176,7 @@ func (tunnel *Tunnel) establish() (string, error) { tunnel.kube.Log(message) // Find the pod to port forward to - podName, err := tunnel.getAttachablePodForResource() + podName, err := tunnel.getAttachablePodForResource(ctx) if err != nil { return "", fmt.Errorf("unable to find pod attached to given resource: %w", err) } @@ -222,29 +235,33 @@ func (tunnel *Tunnel) establish() (string, error) { // getAttachablePodForResource will find a pod that can be port forwarded to the provided resource type and return // the name. -func (tunnel *Tunnel) getAttachablePodForResource() (string, error) { +func (tunnel *Tunnel) getAttachablePodForResource(ctx context.Context) (string, error) { switch tunnel.resourceType { case PodResource: return tunnel.resourceName, nil case SvcResource: - return tunnel.getAttachablePodForService() + return tunnel.getAttachablePodForService(ctx) default: return "", fmt.Errorf("unknown resource type: %s", tunnel.resourceType) } } // getAttachablePodForService will find an active pod associated with the Service and return the pod name. -func (tunnel *Tunnel) getAttachablePodForService() (string, error) { - service, err := tunnel.kube.GetService(tunnel.namespace, tunnel.resourceName) +func (tunnel *Tunnel) getAttachablePodForService(ctx context.Context) (string, error) { + service, err := tunnel.kube.GetService(ctx, tunnel.namespace, tunnel.resourceName) if err != nil { return "", fmt.Errorf("unable to find the service: %w", err) } selectorLabelsOfPods := MakeLabels(service.Spec.Selector) - servicePods := tunnel.kube.WaitForPodsAndContainers(PodLookup{ - Namespace: tunnel.namespace, - Selector: selectorLabelsOfPods, - }, nil) + servicePods := tunnel.kube.WaitForPodsAndContainers( + ctx, + PodLookup{ + Namespace: tunnel.namespace, + Selector: selectorLabelsOfPods, + }, + nil, + ) if len(servicePods) < 1 { return "", fmt.Errorf("no pods found for service %s", tunnel.resourceName) diff --git a/src/pkg/packager/common.go b/src/pkg/packager/common.go index 53b918bde2..f432b8d423 100644 --- a/src/pkg/packager/common.go +++ b/src/pkg/packager/common.go @@ -5,11 +5,11 @@ package packager import ( + "context" "errors" "fmt" "os" "strings" - "time" "slices" @@ -159,17 +159,17 @@ func (p *Packager) GetVariableConfig() *variables.VariableConfig { } // connectToCluster attempts to connect to a cluster if a connection is not already established -func (p *Packager) connectToCluster(timeout time.Duration) (err error) { +func (p *Packager) connectToCluster(ctx context.Context) (err error) { if p.isConnectedToCluster() { return nil } - p.cluster, err = cluster.NewClusterWithWait(timeout) + p.cluster, err = cluster.NewClusterWithWait(ctx) if err != nil { return err } - return p.attemptClusterChecks() + return p.attemptClusterChecks(ctx) } // isConnectedToCluster returns whether the current packager instance is connected to a cluster @@ -189,19 +189,19 @@ func (p *Packager) hasImages() bool { // attemptClusterChecks attempts to connect to the cluster and check for useful metadata and config mismatches. // NOTE: attemptClusterChecks should only return an error if there is a problem significant enough to halt a deployment, otherwise it should return nil and print a warning message. -func (p *Packager) attemptClusterChecks() (err error) { +func (p *Packager) attemptClusterChecks(ctx context.Context) (err error) { spinner := message.NewProgressSpinner("Gathering additional cluster information (if available)") defer spinner.Stop() // Check if the package has already been deployed and get its generation - if existingDeployedPackage, _ := p.cluster.GetDeployedPackage(p.cfg.Pkg.Metadata.Name); existingDeployedPackage != nil { + if existingDeployedPackage, _ := p.cluster.GetDeployedPackage(ctx, p.cfg.Pkg.Metadata.Name); existingDeployedPackage != nil { // If this package has been deployed before, increment the package generation within the secret p.generation = existingDeployedPackage.Generation + 1 } // Check the clusters architecture matches the package spec - if err := p.validatePackageArchitecture(); err != nil { + if err := p.validatePackageArchitecture(ctx); err != nil { if errors.Is(err, lang.ErrUnableToCheckArch) { message.Warnf("Unable to validate package architecture: %s", err.Error()) } else { @@ -210,7 +210,7 @@ func (p *Packager) attemptClusterChecks() (err error) { } // Check for any breaking changes between the initialized Zarf version and this CLI - if existingInitPackage, _ := p.cluster.GetDeployedPackage("init"); existingInitPackage != nil { + if existingInitPackage, _ := p.cluster.GetDeployedPackage(ctx, "init"); existingInitPackage != nil { // Use the build version instead of the metadata since this will support older Zarf versions deprecated.PrintBreakingChanges(existingInitPackage.Data.Build.Version) } @@ -221,13 +221,13 @@ func (p *Packager) attemptClusterChecks() (err error) { } // validatePackageArchitecture validates that the package architecture matches the target cluster architecture. -func (p *Packager) validatePackageArchitecture() error { +func (p *Packager) validatePackageArchitecture(ctx context.Context) error { // Ignore this check if we don't have a cluster connection, or the package contains no images if !p.isConnectedToCluster() || !p.hasImages() { return nil } - clusterArchitectures, err := p.cluster.GetArchitectures() + clusterArchitectures, err := p.cluster.GetArchitectures(ctx) if err != nil { return lang.ErrUnableToCheckArch } diff --git a/src/pkg/packager/common_test.go b/src/pkg/packager/common_test.go index f2d085a1bd..3ead7f9b4b 100644 --- a/src/pkg/packager/common_test.go +++ b/src/pkg/packager/common_test.go @@ -4,6 +4,7 @@ package packager import ( + "context" "errors" "fmt" "testing" @@ -132,7 +133,7 @@ func TestValidatePackageArchitecture(t *testing.T) { return true, nodeList, nil }) - err := p.validatePackageArchitecture() + err := p.validatePackageArchitecture(context.TODO()) require.Equal(t, testCase.expectedError, err) }) diff --git a/src/pkg/packager/deploy.go b/src/pkg/packager/deploy.go index a595888414..039a63cb61 100644 --- a/src/pkg/packager/deploy.go +++ b/src/pkg/packager/deploy.go @@ -5,6 +5,7 @@ package packager import ( + "context" "fmt" "os" "path/filepath" @@ -32,16 +33,16 @@ import ( corev1 "k8s.io/api/core/v1" ) -func (p *Packager) resetRegistryHPA() { +func (p *Packager) resetRegistryHPA(ctx context.Context) { if p.isConnectedToCluster() && p.hpaModified { - if err := p.cluster.EnableRegHPAScaleDown(); err != nil { + if err := p.cluster.EnableRegHPAScaleDown(ctx); err != nil { message.Debugf("unable to reenable the registry HPA scale down: %s", err.Error()) } } } // Deploy attempts to deploy the given PackageConfig. -func (p *Packager) Deploy() (err error) { +func (p *Packager) Deploy(ctx context.Context) (err error) { isInteractive := !config.CommonOptions.Confirm @@ -100,10 +101,10 @@ func (p *Packager) Deploy() (err error) { p.hpaModified = false p.connectStrings = make(types.ConnectStrings) // Reset registry HPA scale down whether an error occurs or not - defer p.resetRegistryHPA() + defer p.resetRegistryHPA(ctx) // Get a list of all the components we are deploying and actually deploy them - deployedComponents, err := p.deployComponents() + deployedComponents, err := p.deployComponents(ctx) if err != nil { return err } @@ -114,14 +115,13 @@ func (p *Packager) Deploy() (err error) { // Notify all the things about the successful deployment message.Successf("Zarf deployment complete") - p.printTablesForDeployment(deployedComponents) + p.printTablesForDeployment(ctx, deployedComponents) return nil } // deployComponents loops through a list of ZarfComponents and deploys them. -func (p *Packager) deployComponents() (deployedComponents []types.DeployedComponent, err error) { - +func (p *Packager) deployComponents(ctx context.Context) (deployedComponents []types.DeployedComponent, err error) { // Check if this package has been deployed before and grab relevant information about already deployed components if p.generation == 0 { p.generation = 1 // If this is the first deployment, set the generation to 1 @@ -142,15 +142,16 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon if p.cfg.Pkg.IsInitConfig() { timeout = 5 * time.Minute } - - if err := p.connectToCluster(timeout); err != nil { + connectCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + if err := p.connectToCluster(connectCtx); err != nil { return deployedComponents, fmt.Errorf("unable to connect to the Kubernetes cluster: %w", err) } } // Ensure we don't overwrite any installedCharts data when updating the package secret if p.isConnectedToCluster() { - deployedComponent.InstalledCharts, err = p.cluster.GetInstalledChartsForComponent(p.cfg.Pkg.Metadata.Name, component) + deployedComponent.InstalledCharts, err = p.cluster.GetInstalledChartsForComponent(ctx, p.cfg.Pkg.Metadata.Name, component) if err != nil { message.Debugf("Unable to fetch installed Helm charts for component '%s': %s", component.Name, err.Error()) } @@ -161,7 +162,7 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon // Update the package secret to indicate that we are attempting to deploy this component if p.isConnectedToCluster() { - if _, err := p.cluster.RecordPackageDeploymentAndWait(p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { + if _, err := p.cluster.RecordPackageDeploymentAndWait(ctx, p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { message.Debugf("Unable to record package deployment for component %s: this will affect features like `zarf package remove`: %s", component.Name, err.Error()) } } @@ -170,9 +171,9 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon var charts []types.InstalledChart var deployErr error if p.cfg.Pkg.IsInitConfig() { - charts, deployErr = p.deployInitComponent(component) + charts, deployErr = p.deployInitComponent(ctx, component) } else { - charts, deployErr = p.deployComponent(component, false /* keep img checksum */, false /* always push images */) + charts, deployErr = p.deployComponent(ctx, component, false /* keep img checksum */, false /* always push images */) } onDeploy := component.Actions.OnDeploy @@ -189,7 +190,7 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon // Update the package secret to indicate that we failed to deploy this component deployedComponents[idx].Status = types.ComponentStatusFailed if p.isConnectedToCluster() { - if _, err := p.cluster.RecordPackageDeploymentAndWait(p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { + if _, err := p.cluster.RecordPackageDeploymentAndWait(ctx, p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { message.Debugf("Unable to record package deployment for component %q: this will affect features like `zarf package remove`: %s", component.Name, err.Error()) } } @@ -201,7 +202,7 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon deployedComponents[idx].InstalledCharts = charts deployedComponents[idx].Status = types.ComponentStatusSucceeded if p.isConnectedToCluster() { - if _, err := p.cluster.RecordPackageDeploymentAndWait(p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { + if _, err := p.cluster.RecordPackageDeploymentAndWait(ctx, p.cfg.Pkg, deployedComponents, p.connectStrings, p.generation, component, p.cfg.DeployOpts.SkipWebhooks); err != nil { message.Debugf("Unable to record package deployment for component %q: this will affect features like `zarf package remove`: %s", component.Name, err.Error()) } } @@ -215,7 +216,7 @@ func (p *Packager) deployComponents() (deployedComponents []types.DeployedCompon return deployedComponents, nil } -func (p *Packager) deployInitComponent(component types.ZarfComponent) (charts []types.InstalledChart, err error) { +func (p *Packager) deployInitComponent(ctx context.Context, component types.ZarfComponent) (charts []types.InstalledChart, err error) { hasExternalRegistry := p.cfg.InitOpts.RegistryInfo.Address != "" isSeedRegistry := component.Name == "zarf-seed-registry" isRegistry := component.Name == "zarf-registry" @@ -229,7 +230,7 @@ func (p *Packager) deployInitComponent(component types.ZarfComponent) (charts [] // Always init the state before the first component that requires the cluster (on most deployments, the zarf-seed-registry) if component.RequiresCluster() && p.state == nil { - err = p.cluster.InitZarfState(p.cfg.InitOpts) + err = p.cluster.InitZarfState(ctx, p.cfg.InitOpts) if err != nil { return charts, fmt.Errorf("unable to initialize Zarf state: %w", err) } @@ -247,17 +248,17 @@ func (p *Packager) deployInitComponent(component types.ZarfComponent) (charts [] // Before deploying the seed registry, start the injector if isSeedRegistry { - p.cluster.StartInjectionMadness(p.layout.Base, p.layout.Images.Base, component.Images) + p.cluster.StartInjectionMadness(ctx, p.layout.Base, p.layout.Images.Base, component.Images) } - charts, err = p.deployComponent(component, isAgent /* skip img checksum if isAgent */, isSeedRegistry /* skip image push if isSeedRegistry */) + charts, err = p.deployComponent(ctx, component, isAgent /* skip img checksum if isAgent */, isSeedRegistry /* skip image push if isSeedRegistry */) if err != nil { return charts, err } // Do cleanup for when we inject the seed registry during initialization if isSeedRegistry { - if err := p.cluster.StopInjectionMadness(); err != nil { + if err := p.cluster.StopInjectionMadness(ctx); err != nil { return charts, fmt.Errorf("unable to seed the Zarf Registry: %w", err) } } @@ -266,7 +267,7 @@ func (p *Packager) deployInitComponent(component types.ZarfComponent) (charts [] } // Deploy a Zarf Component. -func (p *Packager) deployComponent(component types.ZarfComponent, noImgChecksum bool, noImgPush bool) (charts []types.InstalledChart, err error) { +func (p *Packager) deployComponent(ctx context.Context, component types.ZarfComponent, noImgChecksum bool, noImgPush bool) (charts []types.InstalledChart, err error) { // Toggles for general deploy operations componentPath := p.layout.Components.Dirs[component.Name] @@ -285,7 +286,7 @@ func (p *Packager) deployComponent(component types.ZarfComponent, noImgChecksum if component.RequiresCluster() { // Setup the state in the config if p.state == nil { - err = p.setupState() + err = p.setupState(ctx) if err != nil { return charts, err } @@ -293,7 +294,7 @@ func (p *Packager) deployComponent(component types.ZarfComponent, noImgChecksum // Disable the registry HPA scale down if we are deploying images and it is not already disabled if hasImages && !p.hpaModified && p.state.RegistryInfo.InternalRegistry { - if err := p.cluster.DisableRegHPAScaleDown(); err != nil { + if err := p.cluster.DisableRegHPAScaleDown(ctx); err != nil { message.Debugf("unable to disable the registry HPA scale down: %s", err.Error()) } else { p.hpaModified = true @@ -317,13 +318,13 @@ func (p *Packager) deployComponent(component types.ZarfComponent, noImgChecksum } if hasImages { - if err := p.pushImagesToRegistry(component.Images, noImgChecksum); err != nil { + if err := p.pushImagesToRegistry(ctx, component.Images, noImgChecksum); err != nil { return charts, fmt.Errorf("unable to push images to the registry: %w", err) } } if hasRepos { - if err = p.pushReposToRepository(componentPath.Repos, component.Repos); err != nil { + if err = p.pushReposToRepository(ctx, componentPath.Repos, component.Repos); err != nil { return charts, fmt.Errorf("unable to push the repos to the repository: %w", err) } } @@ -334,7 +335,7 @@ func (p *Packager) deployComponent(component types.ZarfComponent, noImgChecksum for idx, data := range component.DataInjections { waitGroup.Add(1) - go p.cluster.HandleDataInjection(&waitGroup, data, componentPath, idx) + go p.cluster.HandleDataInjection(ctx, &waitGroup, data, componentPath, idx) } } @@ -431,12 +432,12 @@ func (p *Packager) processComponentFiles(component types.ZarfComponent, pkgLocat } // setupState fetches the current ZarfState from the k8s cluster and sets the packager to use it -func (p *Packager) setupState() (err error) { +func (p *Packager) setupState(ctx context.Context) (err error) { // If we are touching K8s, make sure we can talk to it once per deployment spinner := message.NewProgressSpinner("Loading the Zarf State from the Kubernetes cluster") defer spinner.Stop() - state, err := p.cluster.LoadZarfState() + state, err := p.cluster.LoadZarfState(ctx) // Return on error if we are not in YOLO mode if err != nil && !p.cfg.Pkg.Metadata.YOLO { return fmt.Errorf("%s %w", lang.ErrLoadState, err) @@ -448,7 +449,7 @@ func (p *Packager) setupState() (err error) { // Try to create the zarf namespace spinner.Updatef("Creating the Zarf namespace") zarfNamespace := p.cluster.NewZarfManagedNamespace(cluster.ZarfNamespaceName) - if _, err := p.cluster.CreateNamespace(zarfNamespace); err != nil { + if _, err := p.cluster.CreateNamespace(ctx, zarfNamespace); err != nil { spinner.Fatalf(err, "Unable to create the zarf namespace") } } @@ -480,7 +481,7 @@ func (p *Packager) populatePackageVariableConfig() error { } // Push all of the components images to the configured container registry. -func (p *Packager) pushImagesToRegistry(componentImages []string, noImgChecksum bool) error { +func (p *Packager) pushImagesToRegistry(ctx context.Context, componentImages []string, noImgChecksum bool) error { var combinedImageList []transform.Image for _, src := range componentImages { ref, err := transform.ParseImageRef(src) @@ -501,11 +502,11 @@ func (p *Packager) pushImagesToRegistry(componentImages []string, noImgChecksum Retries: p.cfg.PkgOpts.Retries, } - return images.Push(pushCfg) + return images.Push(ctx, pushCfg) } // Push all of the components git repos to the configured git server. -func (p *Packager) pushReposToRepository(reposPath string, repos []string) error { +func (p *Packager) pushReposToRepository(ctx context.Context, reposPath string, repos []string) error { for _, repoURL := range repos { // Create an anonymous function to push the repo to the Zarf git server tryPush := func() error { @@ -518,7 +519,9 @@ func (p *Packager) pushReposToRepository(reposPath string, repos []string) error // If this is a service (svcInfo is not nil), create a port-forward tunnel to that resource if svcInfo != nil { if !p.isConnectedToCluster() { - err := p.connectToCluster(5 * time.Second) + connectCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + err := p.connectToCluster(connectCtx) if err != nil { return err } @@ -529,7 +532,7 @@ func (p *Packager) pushReposToRepository(reposPath string, repos []string) error return err } - _, err = tunnel.Connect() + _, err = tunnel.Connect(ctx) if err != nil { return err } @@ -684,7 +687,7 @@ func (p *Packager) installChartAndManifests(componentPaths *layout.ComponentPath return installedCharts, nil } -func (p *Packager) printTablesForDeployment(componentsToDeploy []types.DeployedComponent) { +func (p *Packager) printTablesForDeployment(ctx context.Context, componentsToDeploy []types.DeployedComponent) { // If not init config, print the application connection table if !p.cfg.Pkg.IsInitConfig() { @@ -692,7 +695,7 @@ func (p *Packager) printTablesForDeployment(componentsToDeploy []types.DeployedC } else { if p.cluster != nil { // Grab a fresh copy of the state (if we are able) to print the most up-to-date version of the creds - freshState, err := p.cluster.LoadZarfState() + freshState, err := p.cluster.LoadZarfState(ctx) if err != nil { freshState = p.state } diff --git a/src/pkg/packager/dev.go b/src/pkg/packager/dev.go index 6f7a685e95..187f74c70c 100644 --- a/src/pkg/packager/dev.go +++ b/src/pkg/packager/dev.go @@ -5,6 +5,7 @@ package packager import ( + "context" "fmt" "os" "runtime" @@ -19,7 +20,7 @@ import ( ) // DevDeploy creates + deploys a package in one shot -func (p *Packager) DevDeploy() error { +func (p *Packager) DevDeploy(ctx context.Context) error { config.CommonOptions.Confirm = true p.cfg.CreateOpts.SkipSBOM = !p.cfg.CreateOpts.NoYOLO @@ -81,11 +82,11 @@ func (p *Packager) DevDeploy() error { } else { p.hpaModified = false // Reset registry HPA scale down whether an error occurs or not - defer p.resetRegistryHPA() + defer p.resetRegistryHPA(ctx) } // Get a list of all the components we are deploying and actually deploy them - deployedComponents, err := p.deployComponents() + deployedComponents, err := p.deployComponents(ctx) if err != nil { return err } diff --git a/src/pkg/packager/mirror.go b/src/pkg/packager/mirror.go index 0afec2e480..658967560d 100644 --- a/src/pkg/packager/mirror.go +++ b/src/pkg/packager/mirror.go @@ -5,6 +5,7 @@ package packager import ( + "context" "fmt" "runtime" "strings" @@ -16,7 +17,7 @@ import ( ) // Mirror pulls resources from a package (images, git repositories, etc) and pushes them to remotes in the air gap without deploying them -func (p *Packager) Mirror() (err error) { +func (p *Packager) Mirror(ctx context.Context) (err error) { filter := filters.Combine( filters.ByLocalOS(runtime.GOOS), filters.BySelectState(p.cfg.PkgOpts.OptionalComponents), @@ -46,7 +47,7 @@ func (p *Packager) Mirror() (err error) { } for _, component := range p.cfg.Pkg.Components { - if err := p.mirrorComponent(component); err != nil { + if err := p.mirrorComponent(ctx, component); err != nil { return err } } @@ -54,7 +55,7 @@ func (p *Packager) Mirror() (err error) { } // mirrorComponent mirrors a Zarf Component. -func (p *Packager) mirrorComponent(component types.ZarfComponent) error { +func (p *Packager) mirrorComponent(ctx context.Context, component types.ZarfComponent) error { componentPaths := p.layout.Components.Dirs[component.Name] // All components now require a name @@ -64,13 +65,13 @@ func (p *Packager) mirrorComponent(component types.ZarfComponent) error { hasRepos := len(component.Repos) > 0 if hasImages { - if err := p.pushImagesToRegistry(component.Images, p.cfg.MirrorOpts.NoImgChecksum); err != nil { + if err := p.pushImagesToRegistry(ctx, component.Images, p.cfg.MirrorOpts.NoImgChecksum); err != nil { return fmt.Errorf("unable to push images to the registry: %w", err) } } if hasRepos { - if err := p.pushReposToRepository(componentPaths.Repos, component.Repos); err != nil { + if err := p.pushReposToRepository(ctx, componentPaths.Repos, component.Repos); err != nil { return fmt.Errorf("unable to push the repos to the repository: %w", err) } } diff --git a/src/pkg/packager/remove.go b/src/pkg/packager/remove.go index c0329ce55d..a107147714 100644 --- a/src/pkg/packager/remove.go +++ b/src/pkg/packager/remove.go @@ -5,6 +5,7 @@ package packager import ( + "context" "encoding/json" "errors" "fmt" @@ -26,7 +27,7 @@ import ( ) // Remove removes a package that was already deployed onto a cluster, uninstalling all installed helm charts. -func (p *Packager) Remove() (err error) { +func (p *Packager) Remove(ctx context.Context) (err error) { _, isClusterSource := p.source.(*sources.ClusterSource) if isClusterSource { p.cluster = p.source.(*sources.ClusterSource).Cluster @@ -70,11 +71,13 @@ func (p *Packager) Remove() (err error) { deployedPackage := &types.DeployedPackage{} if packageRequiresCluster { - err = p.connectToCluster(cluster.DefaultTimeout) + connectCtx, cancel := context.WithTimeout(ctx, cluster.DefaultTimeout) + defer cancel() + err = p.connectToCluster(connectCtx) if err != nil { return err } - deployedPackage, err = p.cluster.GetDeployedPackage(packageName) + deployedPackage, err = p.cluster.GetDeployedPackage(ctx, packageName) if err != nil { return fmt.Errorf("unable to load the secret for the package we are attempting to remove: %s", err.Error()) } @@ -93,7 +96,7 @@ func (p *Packager) Remove() (err error) { continue } - if deployedPackage, err = p.removeComponent(deployedPackage, dc, spinner); err != nil { + if deployedPackage, err = p.removeComponent(ctx, deployedPackage, dc, spinner); err != nil { return fmt.Errorf("unable to remove the component '%s': %w", dc.Name, err) } } @@ -101,7 +104,7 @@ func (p *Packager) Remove() (err error) { return nil } -func (p *Packager) updatePackageSecret(deployedPackage types.DeployedPackage) { +func (p *Packager) updatePackageSecret(ctx context.Context, deployedPackage types.DeployedPackage) { // Only attempt to update the package secret if we are actually connected to a cluster if p.cluster != nil { secretName := config.ZarfPackagePrefix + deployedPackage.Name @@ -113,7 +116,7 @@ func (p *Packager) updatePackageSecret(deployedPackage types.DeployedPackage) { newPackageSecretData, _ := json.Marshal(deployedPackage) newPackageSecret.Data["data"] = newPackageSecretData - _, err := p.cluster.CreateOrUpdateSecret(newPackageSecret) + _, err := p.cluster.CreateOrUpdateSecret(ctx, newPackageSecret) // We warn and ignore errors because we may have removed the cluster that this package was inside of if err != nil { @@ -122,7 +125,7 @@ func (p *Packager) updatePackageSecret(deployedPackage types.DeployedPackage) { } } -func (p *Packager) removeComponent(deployedPackage *types.DeployedPackage, deployedComponent types.DeployedComponent, spinner *message.Spinner) (*types.DeployedPackage, error) { +func (p *Packager) removeComponent(ctx context.Context, deployedPackage *types.DeployedPackage, deployedComponent types.DeployedComponent, spinner *message.Spinner) (*types.DeployedPackage, error) { components := deployedPackage.Data.Components c := helpers.Find(components, func(t types.ZarfComponent) bool { @@ -162,7 +165,7 @@ func (p *Packager) removeComponent(deployedPackage *types.DeployedPackage, deplo deployedComponent.InstalledCharts = helpers.RemoveMatches(deployedComponent.InstalledCharts, func(t types.InstalledChart) bool { return t.ChartName == chart.ChartName }) - p.updatePackageSecret(*deployedPackage) + p.updatePackageSecret(ctx, *deployedPackage) } if err := actions.Run(onRemove.Defaults, onRemove.After, nil); err != nil { @@ -184,19 +187,19 @@ func (p *Packager) removeComponent(deployedPackage *types.DeployedPackage, deplo secretName := config.ZarfPackagePrefix + deployedPackage.Name // All the installed components were deleted, therefore this package is no longer actually deployed - packageSecret, err := p.cluster.GetSecret(cluster.ZarfNamespaceName, secretName) + packageSecret, err := p.cluster.GetSecret(ctx, cluster.ZarfNamespaceName, secretName) // We warn and ignore errors because we may have removed the cluster that this package was inside of if err != nil { message.Warnf("Unable to delete the '%s' package secret: '%s' (this may be normal if the cluster was removed)", secretName, err.Error()) } else { - err = p.cluster.DeleteSecret(packageSecret) + err = p.cluster.DeleteSecret(ctx, packageSecret) if err != nil { message.Warnf("Unable to delete the '%s' package secret: '%s' (this may be normal if the cluster was removed)", secretName, err.Error()) } } } else { - p.updatePackageSecret(*deployedPackage) + p.updatePackageSecret(ctx, *deployedPackage) } return deployedPackage, nil diff --git a/src/pkg/packager/sources/cluster.go b/src/pkg/packager/sources/cluster.go index 9fb74d6e81..c1478cb3da 100644 --- a/src/pkg/packager/sources/cluster.go +++ b/src/pkg/packager/sources/cluster.go @@ -5,6 +5,7 @@ package sources import ( + "context" "fmt" "github.com/defenseunicorns/pkg/helpers" @@ -25,7 +26,11 @@ func NewClusterSource(pkgOpts *types.ZarfPackageOptions) (PackageSource, error) if !types.IsLowercaseNumberHyphenNoStartHyphen(pkgOpts.PackageSource) { return nil, fmt.Errorf("invalid package name %q", pkgOpts.PackageSource) } - cluster, err := cluster.NewClusterWithWait(cluster.DefaultTimeout) + + ctx, cancel := context.WithTimeout(context.Background(), cluster.DefaultTimeout) + defer cancel() + + cluster, err := cluster.NewClusterWithWait(ctx) if err != nil { return nil, err } @@ -54,7 +59,9 @@ func (s *ClusterSource) Collect(_ string) (string, error) { // LoadPackageMetadata loads package metadata from a cluster. func (s *ClusterSource) LoadPackageMetadata(dst *layout.PackagePaths, _ bool, _ bool) (types.ZarfPackage, []string, error) { - dpkg, err := s.GetDeployedPackage(s.PackageSource) + ctx := context.Background() + + dpkg, err := s.GetDeployedPackage(ctx, s.PackageSource) if err != nil { return types.ZarfPackage{}, nil, err } diff --git a/src/test/e2e/21_connect_creds_test.go b/src/test/e2e/21_connect_creds_test.go index 4fe560f6a8..a66d390f18 100644 --- a/src/test/e2e/21_connect_creds_test.go +++ b/src/test/e2e/21_connect_creds_test.go @@ -5,6 +5,7 @@ package test import ( + "context" "crypto/tls" "fmt" "io" @@ -27,7 +28,9 @@ func TestConnectAndCreds(t *testing.T) { prevAgentSecretData, _, err := e2e.Kubectl("get", "secret", "agent-hook-tls", "-n", "zarf", "-o", "jsonpath={.data}") require.NoError(t, err) - connectToZarfServices(t) + ctx := context.Background() + + connectToZarfServices(ctx, t) stdOut, stdErr, err := e2e.Zarf("tools", "update-creds", "--confirm") require.NoError(t, err, stdOut, stdErr) @@ -36,7 +39,7 @@ func TestConnectAndCreds(t *testing.T) { require.NoError(t, err) require.NotEqual(t, prevAgentSecretData, newAgentSecretData, "agent secrets should not be the same") - connectToZarfServices(t) + connectToZarfServices(ctx, t) stdOut, stdErr, err = e2e.Zarf("package", "remove", "init", "--components=logging", "--confirm") require.NoError(t, err, stdOut, stdErr) @@ -68,7 +71,7 @@ func TestMetrics(t *testing.T) { tunnel, err := c.NewTunnel("zarf", "svc", "agent-hook", "", 8888, 8443) require.NoError(t, err) - _, err = tunnel.Connect() + _, err = tunnel.Connect(context.Background()) require.NoError(t, err) defer tunnel.Close() @@ -98,7 +101,7 @@ func TestMetrics(t *testing.T) { require.Equal(t, 200, resp.StatusCode) } -func connectToZarfServices(t *testing.T) { +func connectToZarfServices(ctx context.Context, t *testing.T) { // Make the Registry contains the images we expect stdOut, stdErr, err := e2e.Zarf("tools", "registry", "catalog") require.NoError(t, err, stdOut, stdErr) @@ -129,7 +132,7 @@ func connectToZarfServices(t *testing.T) { // Connect to Gitea c, err := cluster.NewCluster() require.NoError(t, err) - tunnelGit, err := c.Connect(cluster.ZarfGit) + tunnelGit, err := c.Connect(ctx, cluster.ZarfGit) require.NoError(t, err) defer tunnelGit.Close() @@ -150,7 +153,7 @@ func connectToZarfServices(t *testing.T) { // Connect to the Logging Stack c, err = cluster.NewCluster() require.NoError(t, err) - tunnelLog, err := c.Connect(cluster.ZarfLogging) + tunnelLog, err := c.Connect(ctx, cluster.ZarfLogging) require.NoError(t, err) defer tunnelLog.Close() diff --git a/src/test/e2e/22_git_and_gitops_test.go b/src/test/e2e/22_git_and_gitops_test.go index f83bd6ba05..7dc712e71a 100644 --- a/src/test/e2e/22_git_and_gitops_test.go +++ b/src/test/e2e/22_git_and_gitops_test.go @@ -5,6 +5,7 @@ package test import ( + "context" "encoding/base64" "encoding/json" "fmt" @@ -12,6 +13,7 @@ import ( "path/filepath" "testing" + "github.com/defenseunicorns/zarf/src/cmd/common" "github.com/defenseunicorns/zarf/src/internal/packager/git" "github.com/defenseunicorns/zarf/src/pkg/cluster" "github.com/defenseunicorns/zarf/src/types" @@ -35,13 +37,14 @@ func TestGit(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnelGit, err := c.Connect(cluster.ZarfGit) + ctx := context.Background() + tunnelGit, err := c.Connect(ctx, cluster.ZarfGit) require.NoError(t, err) defer tunnelGit.Close() testGitServerConnect(t, tunnelGit.HTTPEndpoint()) - testGitServerReadOnly(t, tunnelGit.HTTPEndpoint()) - testGitServerTagAndHash(t, tunnelGit.HTTPEndpoint()) + testGitServerReadOnly(ctx, t, tunnelGit.HTTPEndpoint()) + testGitServerTagAndHash(ctx, t, tunnelGit.HTTPEndpoint()) } func TestGitOpsFlux(t *testing.T) { @@ -65,9 +68,9 @@ func testGitServerConnect(t *testing.T, gitURL string) { require.Equal(t, 200, resp.StatusCode) } -func testGitServerReadOnly(t *testing.T, gitURL string) { +func testGitServerReadOnly(ctx context.Context, t *testing.T, gitURL string) { // Init the state variable - state, err := cluster.NewClusterOrDie().LoadZarfState() + state, err := common.NewClusterOrDie(ctx).LoadZarfState(ctx) require.NoError(t, err) gitCfg := git.New(state.GitServer) @@ -88,9 +91,9 @@ func testGitServerReadOnly(t *testing.T, gitURL string) { require.True(t, permissionsMap["pull"].(bool)) } -func testGitServerTagAndHash(t *testing.T, gitURL string) { +func testGitServerTagAndHash(ctx context.Context, t *testing.T, gitURL string) { // Init the state variable - state, err := cluster.NewClusterOrDie().LoadZarfState() + state, err := common.NewClusterOrDie(ctx).LoadZarfState(ctx) require.NoError(t, err, "Failed to load Zarf state") repoName := "zarf-public-test-2469062884" diff --git a/src/test/e2e/23_data_injection_test.go b/src/test/e2e/23_data_injection_test.go index ee65c49c0a..efbce9bc13 100644 --- a/src/test/e2e/23_data_injection_test.go +++ b/src/test/e2e/23_data_injection_test.go @@ -21,6 +21,8 @@ func TestDataInjection(t *testing.T) { t.Log("E2E: Data injection") e2e.SetupWithCluster(t) + ctx := context.Background() + path := fmt.Sprintf("build/zarf-package-kiwix-%s-3.5.0.tar", e2e.Arch) tmpdir := t.TempDir() @@ -28,7 +30,7 @@ func TestDataInjection(t *testing.T) { // Repeat the injection action 3 times to ensure the data injection is idempotent and doesn't fail to perform an upgrade for i := 0; i < 3; i++ { - runDataInjection(t, path) + runDataInjection(ctx, t, path) } // Verify the file and injection marker were created @@ -42,7 +44,7 @@ func TestDataInjection(t *testing.T) { // need target to equal svc that we are trying to connect to call checkForZarfConnectLabel c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect("kiwix") + tunnel, err := c.Connect(ctx, "kiwix") require.NoError(t, err) defer tunnel.Close() @@ -64,9 +66,9 @@ func TestDataInjection(t *testing.T) { require.FileExists(t, filepath.Join(sbomPath, "kiwix", "zarf-component-kiwix-serve.json"), "The data-injection component should have an SBOM json") } -func runDataInjection(t *testing.T, path string) { +func runDataInjection(ctx context.Context, t *testing.T, path string) { // Limit this deploy to 5 minutes - ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute) + ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) defer cancel() // Deploy the data injection example diff --git a/src/test/e2e/26_simple_packages_test.go b/src/test/e2e/26_simple_packages_test.go index 996982d383..c7a96e80d6 100644 --- a/src/test/e2e/26_simple_packages_test.go +++ b/src/test/e2e/26_simple_packages_test.go @@ -5,6 +5,7 @@ package test import ( + "context" "fmt" "net/http" "path/filepath" @@ -26,7 +27,7 @@ func TestDosGames(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect("doom") + tunnel, err := c.Connect(context.Background(), "doom") require.NoError(t, err) defer tunnel.Close() diff --git a/src/test/e2e/30_config_file_test.go b/src/test/e2e/30_config_file_test.go index a03844fa5a..5095efa91f 100644 --- a/src/test/e2e/30_config_file_test.go +++ b/src/test/e2e/30_config_file_test.go @@ -26,9 +26,9 @@ func TestConfigFile(t *testing.T) { e2e.CleanFiles(path) // Test the config file environment variable - os.Setenv("ZARF_CONFIG", filepath.Join(dir, config)) + t.Setenv("ZARF_CONFIG", filepath.Join(dir, config)) + defer os.Unsetenv("ZARF_CONFIG") configFileTests(t, dir, path) - os.Unsetenv("ZARF_CONFIG") configFileDefaultTests(t) @@ -39,6 +39,8 @@ func TestConfigFile(t *testing.T) { } func configFileTests(t *testing.T, dir, path string) { + t.Helper() + _, stdErr, err := e2e.Zarf("package", "create", dir, "--confirm") require.NoError(t, err) require.Contains(t, string(stdErr), "This is a zebra and they have stripes") @@ -94,6 +96,7 @@ H4RxbE+FpmsMAUCpdrzvFkc= } func configFileDefaultTests(t *testing.T) { + t.Helper() globalFlags := []string{ "architecture: 509a38f0", @@ -136,7 +139,8 @@ func configFileDefaultTests(t *testing.T) { } // Test remaining default initializers - os.Setenv("ZARF_CONFIG", filepath.Join("src", "test", "zarf-config-test.toml")) + t.Setenv("ZARF_CONFIG", filepath.Join("src", "test", "zarf-config-test.toml")) + defer os.Unsetenv("ZARF_CONFIG") // Test global flags stdOut, _, _ := e2e.Zarf("--help") @@ -161,6 +165,4 @@ func configFileDefaultTests(t *testing.T) { for _, test := range packageDeployFlags { require.Contains(t, string(stdOut), test) } - - os.Unsetenv("ZARF_CONFIG") } diff --git a/src/test/e2e/33_component_webhooks_test.go b/src/test/e2e/33_component_webhooks_test.go index 349e0d0770..cbb8321b08 100644 --- a/src/test/e2e/33_component_webhooks_test.go +++ b/src/test/e2e/33_component_webhooks_test.go @@ -27,12 +27,12 @@ func TestComponentWebhooks(t *testing.T) { gamesPath := fmt.Sprintf("build/zarf-package-dos-games-%s-1.0.0.tar.zst", e2e.Arch) stdOut, stdErr, err = e2e.Zarf("package", "deploy", gamesPath, "--confirm") require.NoError(t, err, stdOut, stdErr) - require.Contains(t, stdErr, "Waiting for webhook 'test-webhook' to complete for component 'baseline'") + require.Contains(t, stdErr, "Waiting for webhook \"test-webhook\" to complete for component \"baseline\"") // Ensure package deployments with the '--skip-webhooks' flag do not wait on webhooks to complete. stdOut, stdErr, err = e2e.Zarf("package", "deploy", gamesPath, "--skip-webhooks", "--confirm") require.NoError(t, err, stdOut, stdErr) - require.NotContains(t, stdErr, "Waiting for webhook 'test-webhook' to complete for component 'baseline'") + require.NotContains(t, stdErr, "Waiting for webhook \"test-webhook\" to complete for component \"baseline\"") // Remove the Pepr webhook package. stdOut, stdErr, err = e2e.Zarf("package", "remove", "component-webhooks", "--confirm") diff --git a/src/test/e2e/99_yolo_test.go b/src/test/e2e/99_yolo_test.go index a4044c53a9..bce9ab1450 100644 --- a/src/test/e2e/99_yolo_test.go +++ b/src/test/e2e/99_yolo_test.go @@ -5,6 +5,7 @@ package test import ( + "context" "fmt" "net/http" "testing" @@ -35,7 +36,7 @@ func TestYOLOMode(t *testing.T) { c, err := cluster.NewCluster() require.NoError(t, err) - tunnel, err := c.Connect("doom") + tunnel, err := c.Connect(context.Background(), "doom") require.NoError(t, err) defer tunnel.Close() diff --git a/src/test/external/ext_in_cluster_test.go b/src/test/external/ext_in_cluster_test.go index a0a4aae7c4..3338b5474f 100644 --- a/src/test/external/ext_in_cluster_test.go +++ b/src/test/external/ext_in_cluster_test.go @@ -81,10 +81,12 @@ func (suite *ExtInClusterTestSuite) Test_0_Mirror() { c, err := cluster.NewCluster() suite.NoError(err) + ctx := context.TODO() + // Check that the registry contains the images we want tunnelReg, err := c.NewTunnel("external-registry", "svc", "external-registry-docker-registry", "", 0, 5000) suite.NoError(err) - _, err = tunnelReg.Connect() + _, err = tunnelReg.Connect(ctx) suite.NoError(err) defer tunnelReg.Close() @@ -101,7 +103,7 @@ func (suite *ExtInClusterTestSuite) Test_0_Mirror() { tunnelGit, err := c.NewTunnel("git-server", "svc", "gitea-http", "", 0, 3000) suite.NoError(err) - _, err = tunnelGit.Connect() + _, err = tunnelGit.Connect(ctx) suite.NoError(err) defer tunnelGit.Close() diff --git a/src/test/external/ext_out_cluster_test.go b/src/test/external/ext_out_cluster_test.go index 7d086bc137..2fd4178cf3 100644 --- a/src/test/external/ext_out_cluster_test.go +++ b/src/test/external/ext_out_cluster_test.go @@ -174,7 +174,8 @@ func (suite *ExtOutClusterTestSuite) Test_2_AuthToPrivateHelmChart() { URL: chartURL, } repoFile.Add(entry) - utils.WriteYaml(repoPath, repoFile, helpers.ReadWriteUser) + err = utils.WriteYaml(repoPath, repoFile, helpers.ReadWriteUser) + suite.NoError(err) err = exec.CmdWithPrint(zarfBinPath, findImageArgs...) suite.NoError(err, "Unable to find images, helm auth likely failed") @@ -192,7 +193,8 @@ func (suite *ExtOutClusterTestSuite) createHelmChartInGitea(baseURL string, user podinfoTarballPath := filepath.Join(tempDir, fmt.Sprintf("podinfo-%s.tgz", podInfoVersion)) suite.NoError(err, "Unable to package chart") - utils.DownloadToFile(fmt.Sprintf("https://stefanprodan.github.io/podinfo/podinfo-%s.tgz", podInfoVersion), podinfoTarballPath, "") + err = utils.DownloadToFile(fmt.Sprintf("https://stefanprodan.github.io/podinfo/podinfo-%s.tgz", podInfoVersion), podinfoTarballPath, "") + suite.NoError(err) url := fmt.Sprintf("%s/api/packages/%s/helm/api/charts", baseURL, username) file, err := os.Open(podinfoTarballPath)