From df5436b38025cb0b0e06a4cceffa3c863307428d Mon Sep 17 00:00:00 2001 From: Lyndon-Li Date: Tue, 18 Oct 2022 19:38:28 +0800 Subject: [PATCH 1/3] upgrade velero docker image Signed-off-by: Lyndon-Li --- golangci.yaml | 12 ++++++------ hack/build-image/Dockerfile | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/golangci.yaml b/golangci.yaml index 0b6da27f7c..8c6d87b63e 100644 --- a/golangci.yaml +++ b/golangci.yaml @@ -25,8 +25,8 @@ run: # from this option's value (see skip-dirs-use-default). # "/" will be replaced by current OS file path separator to properly work # on Windows. - #skip-dirs: - # - src/external_libs + skip-dirs: + - test/e2e/* # - autogenerated_by_my_lib # default is true. Enables skipping of directories: @@ -39,8 +39,8 @@ run: # autogenerated files. If it's not please let us know. # "/" will be replaced by current OS file path separator to properly work # on Windows. - # skip-files: - # - ".*\\.my\\.go$" + skip-files: + - ".*_test.go$" # - lib/bad.go # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules": @@ -320,7 +320,7 @@ linters: fast: false -#issues: +issues: # # List of regexps of issue texts to exclude, empty list by default. # # But independently from this option we use default exclude patterns, # # it can be disabled by `exclude-use-default: false`. To list all @@ -359,7 +359,7 @@ linters: # it can be disabled by this option. To list all # excluded by default patterns execute `golangci-lint run --help`. # Default value for this option is true. - exclude-use-default: false + exclude-use-default: true # The default value is false. If set to true exclude and exclude-rules # regular expressions become case sensitive. diff --git a/hack/build-image/Dockerfile b/hack/build-image/Dockerfile index 5baa4365e2..b3c5e42a04 100644 --- a/hack/build-image/Dockerfile +++ b/hack/build-image/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang:1.17 +FROM golang:1.18 ARG GOPROXY @@ -36,11 +36,11 @@ RUN wget --quiet https://github.com/kubernetes-sigs/kubebuilder/releases/downloa chmod +x /usr/local/kubebuilder/bin/kubebuilder # get controller-tools -RUN go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0 +RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0 # get goimports (the revision is pinned so we don't indiscriminately update, but the particular commit # is not important) -RUN go get golang.org/x/tools/cmd/goimports@11e9d9cc0042e6bd10337d4d2c3e5d9295508e7d +RUN go install golang.org/x/tools/cmd/goimports@11e9d9cc0042e6bd10337d4d2c3e5d9295508e7d # get protoc compiler and golang plugin WORKDIR /root @@ -49,7 +49,7 @@ RUN wget --quiet https://github.com/protocolbuffers/protobuf/releases/download/v unzip protoc-3.14.0-linux-x86_64.zip && \ mv bin/protoc /usr/bin/protoc && \ chmod +x /usr/bin/protoc -RUN go get github.com/golang/protobuf/protoc-gen-go@v1.4.3 +RUN go install github.com/golang/protobuf/protoc-gen-go@v1.4.3 # get goreleaser RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v0.120.8/goreleaser_Linux_x86_64.tar.gz && \ @@ -58,7 +58,7 @@ RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v0.1 chmod +x /usr/bin/goreleaser # get golangci-lint -RUN curl -sSfL https://github.com/raw/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.27.0 +RUN curl -sSfL https://github.com/raw/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.45.0 # install kubectl RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl From d7b4583b2b22b4df0c1c0d3c54b412ca4ae77d0e Mon Sep 17 00:00:00 2001 From: Lyndon-Li Date: Tue, 18 Oct 2022 21:08:55 +0800 Subject: [PATCH 2/3] fix lint errors Signed-off-by: Lyndon-Li --- pkg/archive/extractor.go | 2 +- pkg/backup/item_collector.go | 4 +- pkg/cmd/cli/backup/create.go | 10 +++-- pkg/cmd/cli/backup/describe.go | 2 +- pkg/cmd/cli/backuplocation/create.go | 2 +- pkg/cmd/cli/backuplocation/delete.go | 6 +-- pkg/cmd/cli/backuplocation/set.go | 2 +- pkg/cmd/cli/nodeagent/server.go | 4 +- pkg/cmd/cli/restore/create.go | 12 ++++-- pkg/cmd/cli/restore/describe.go | 2 +- pkg/cmd/cli/schedule/describe.go | 2 +- pkg/cmd/server/server.go | 4 +- .../util/downloadrequest/downloadrequest.go | 2 +- pkg/cmd/util/output/backup_describer.go | 14 +++---- pkg/cmd/util/output/output.go | 5 ++- pkg/cmd/util/output/restore_describer.go | 8 ++-- pkg/controller/backup_controller.go | 7 ++-- pkg/controller/backup_deletion_controller.go | 8 ++-- pkg/controller/backup_sync_controller.go | 8 ++-- .../pod_volume_restore_controller.go | 2 +- pkg/controller/restore_controller.go | 8 ++-- pkg/nodeagent/node_agent.go | 2 +- .../clientmgmt/process/client_builder.go | 2 +- pkg/repository/config/aws.go | 1 + pkg/repository/config/azure.go | 38 +++++++++---------- pkg/repository/config/gcp.go | 1 + pkg/repository/keys/keys.go | 1 + pkg/repository/provider/unified_repo.go | 6 +-- pkg/restic/command.go | 2 +- pkg/restore/admissionwebhook_config_action.go | 4 +- pkg/restore/restore.go | 2 +- pkg/util/kube/utils.go | 2 +- 32 files changed, 94 insertions(+), 81 deletions(-) diff --git a/pkg/archive/extractor.go b/pkg/archive/extractor.go index 7a0dfce74b..1bd16c2586 100644 --- a/pkg/archive/extractor.go +++ b/pkg/archive/extractor.go @@ -84,7 +84,7 @@ func (e *Extractor) readBackup(tarRdr *tar.Reader) (string, error) { return "", err } - target := filepath.Join(dir, header.Name) + target := filepath.Join(dir, header.Name) //nolint:gosec switch header.Typeflag { case tar.TypeDir: diff --git a/pkg/backup/item_collector.go b/pkg/backup/item_collector.go index 474fbabee3..88be4e0789 100644 --- a/pkg/backup/item_collector.go +++ b/pkg/backup/item_collector.go @@ -151,7 +151,7 @@ func sortResourcesByOrder(log logrus.FieldLogger, items []*kubernetesResource, o } // getOrderedResourcesForType gets order of resourceType from orderResources. -func getOrderedResourcesForType(log logrus.FieldLogger, orderedResources map[string]string, resourceType string) []string { +func getOrderedResourcesForType(orderedResources map[string]string, resourceType string) []string { if orderedResources == nil { return nil } @@ -175,7 +175,7 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group clusterScoped = !resource.Namespaced ) - orders := getOrderedResourcesForType(log, r.backupRequest.Backup.Spec.OrderedResources, resource.Name) + orders := getOrderedResourcesForType(r.backupRequest.Backup.Spec.OrderedResources, resource.Name) // Getting the preferred group version of this resource preferredGVR, _, err := r.discoveryHelper.ResourceFor(gr.WithVersion("")) if err != nil { diff --git a/pkg/cmd/cli/backup/create.go b/pkg/cmd/cli/backup/create.go index 52aeab3d52..27c0be3729 100644 --- a/pkg/cmd/cli/backup/create.go +++ b/pkg/cmd/cli/backup/create.go @@ -112,6 +112,10 @@ func NewCreateOptions() *CreateOptions { } } +const ( + strTrue = "true" +) + func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.DurationVar(&o.TTL, "ttl", o.TTL, "How long before the backup can be garbage collected.") flags.Var(&o.IncludeNamespaces, "include-namespaces", "Namespaces to include in the backup (use '*' for all namespaces).") @@ -127,13 +131,13 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "Take snapshots of PersistentVolumes as part of the backup. If the parameter is not set, it is treated as setting to 'true'.") // this allows the user to just specify "--snapshot-volumes" as shorthand for "--snapshot-volumes=true" // like a normal bool flag - f.NoOptDefVal = "true" + f.NoOptDefVal = strTrue f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "Include cluster-scoped resources in the backup") - f.NoOptDefVal = "true" + f.NoOptDefVal = strTrue f = flags.VarPF(&o.DefaultVolumesToFsBackup, "default-volumes-to-fs-backup", "", "Use pod volume file system backup by default for volumes") - f.NoOptDefVal = "true" + f.NoOptDefVal = strTrue } // BindWait binds the wait flag separately so it is not called by other create diff --git a/pkg/cmd/cli/backup/describe.go b/pkg/cmd/cli/backup/describe.go index 82ca472980..3e5ebbded3 100644 --- a/pkg/cmd/cli/backup/describe.go +++ b/pkg/cmd/cli/backup/describe.go @@ -102,7 +102,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { } } - s := output.DescribeBackup(context.Background(), kbClient, &backup, deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) + s := output.DescribeBackup(context.Background(), kbClient, &backup, deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) //nolint if first { first = false fmt.Print(s) diff --git a/pkg/cmd/cli/backuplocation/create.go b/pkg/cmd/cli/backuplocation/create.go index a90564be3a..ec19b6e56d 100644 --- a/pkg/cmd/cli/backuplocation/create.go +++ b/pkg/cmd/cli/backuplocation/create.go @@ -212,7 +212,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { for _, location := range locations.Items { if location.Spec.Default { location.Spec.Default = false - if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { + if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { //nolint return errors.WithStack(err) } break diff --git a/pkg/cmd/cli/backuplocation/delete.go b/pkg/cmd/cli/backuplocation/delete.go index c04c74f42e..d67ed54369 100644 --- a/pkg/cmd/cli/backuplocation/delete.go +++ b/pkg/cmd/cli/backuplocation/delete.go @@ -116,7 +116,7 @@ func Run(f client.Factory, o *cli.DeleteOptions) error { // Create a backup-location deletion request for each for _, location := range locations.Items { - if err := kbClient.Delete(context.Background(), &location, &kbclient.DeleteOptions{}); err != nil { + if err := kbClient.Delete(context.Background(), &location, &kbclient.DeleteOptions{}); err != nil { //nolint errs = append(errs, errors.WithStack(err)) continue } @@ -163,7 +163,7 @@ func findAssociatedBackupRepos(client kbclient.Client, bslName, ns string) (vele func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []error { var errs []error for _, backup := range backups.Items { - if err := client.Delete(context.Background(), &backup, &kbclient.DeleteOptions{}); err != nil { + if err := client.Delete(context.Background(), &backup, &kbclient.DeleteOptions{}); err != nil { //nolint errs = append(errs, errors.WithStack(fmt.Errorf("delete backup %q associated with deleted BSL: %w", backup.Name, err))) continue } @@ -175,7 +175,7 @@ func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []err func deleteBackupRepos(client kbclient.Client, repos velerov1api.BackupRepositoryList) []error { var errs []error for _, repo := range repos.Items { - if err := client.Delete(context.Background(), &repo, &kbclient.DeleteOptions{}); err != nil { + if err := client.Delete(context.Background(), &repo, &kbclient.DeleteOptions{}); err != nil { //nolint errs = append(errs, errors.WithStack(fmt.Errorf("delete backup repository %q associated with deleted BSL: %w", repo.Name, err))) continue } diff --git a/pkg/cmd/cli/backuplocation/set.go b/pkg/cmd/cli/backuplocation/set.go index dda731ac6d..44730d659d 100644 --- a/pkg/cmd/cli/backuplocation/set.go +++ b/pkg/cmd/cli/backuplocation/set.go @@ -129,7 +129,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error { break } location.Spec.Default = false - if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { + if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { //nolint return errors.WithStack(err) } break diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index 373aa1161f..7fb3dfc827 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -307,7 +307,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) { pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed pvb.Status.Message = fmt.Sprintf("get a podvolumebackup with status %q during the server starting, mark it as %q", velerov1api.PodVolumeBackupPhaseInProgress, pvb.Status.Phase) pvb.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(s.ctx, &pvb, ctrlclient.MergeFrom(original)); err != nil { + if err := client.Patch(s.ctx, &pvb, ctrlclient.MergeFrom(original)); err != nil { //nolint s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumebackup %q", pvb.GetName()) continue } @@ -345,7 +345,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) { pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed pvr.Status.Message = fmt.Sprintf("get a podvolumerestore with status %q during the server starting, mark it as %q", velerov1api.PodVolumeRestorePhaseInProgress, pvr.Status.Phase) pvr.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(s.ctx, &pvr, ctrlclient.MergeFrom(original)); err != nil { + if err := client.Patch(s.ctx, &pvr, ctrlclient.MergeFrom(original)); err != nil { //nolint s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName()) continue } diff --git a/pkg/cmd/cli/restore/create.go b/pkg/cmd/cli/restore/create.go index 7625c1f762..2ac45e8ee8 100644 --- a/pkg/cmd/cli/restore/create.go +++ b/pkg/cmd/cli/restore/create.go @@ -107,6 +107,10 @@ func NewCreateOptions() *CreateOptions { } } +const ( + strTrue = "true" +) + func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.StringVar(&o.BackupName, "from-backup", "", "Backup to restore from") flags.StringVar(&o.ScheduleName, "from-schedule", "", "Schedule to restore from") @@ -123,18 +127,18 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { f := flags.VarPF(&o.RestoreVolumes, "restore-volumes", "", "Whether to restore volumes from snapshots.") // this allows the user to just specify "--restore-volumes" as shorthand for "--restore-volumes=true" // like a normal bool flag - f.NoOptDefVal = "true" + f.NoOptDefVal = strTrue f = flags.VarPF(&o.PreserveNodePorts, "preserve-nodeports", "", "Whether to preserve nodeports of Services when restoring.") // this allows the user to just specify "--preserve-nodeports" as shorthand for "--preserve-nodeports=true" // like a normal bool flag - f.NoOptDefVal = "true" + f.NoOptDefVal = strTrue f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "Include cluster-scoped resources in the restore.") - f.NoOptDefVal = "true" + f.NoOptDefVal = strTrue f = flags.VarPF(&o.AllowPartiallyFailed, "allow-partially-failed", "", "If using --from-schedule, whether to consider PartiallyFailed backups when looking for the most recent one. This flag has no effect if not using --from-schedule.") - f.NoOptDefVal = "true" + f.NoOptDefVal = strTrue flags.BoolVarP(&o.Wait, "wait", "w", o.Wait, "Wait for the operation to complete.") } diff --git a/pkg/cmd/cli/restore/describe.go b/pkg/cmd/cli/restore/describe.go index 76ef3a43b0..e8cbb8d885 100644 --- a/pkg/cmd/cli/restore/describe.go +++ b/pkg/cmd/cli/restore/describe.go @@ -75,7 +75,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { fmt.Fprintf(os.Stderr, "error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err) } - s := output.DescribeRestore(context.Background(), kbClient, &restore, podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) + s := output.DescribeRestore(context.Background(), kbClient, &restore, podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) //nolint if first { first = false fmt.Print(s) diff --git a/pkg/cmd/cli/schedule/describe.go b/pkg/cmd/cli/schedule/describe.go index ba6972a96c..5c64610436 100644 --- a/pkg/cmd/cli/schedule/describe.go +++ b/pkg/cmd/cli/schedule/describe.go @@ -54,7 +54,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { first := true for _, schedule := range schedules.Items { - s := output.DescribeSchedule(&schedule) + s := output.DescribeSchedule(&schedule) //nolint if first { first = false fmt.Print(s) diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index 238d155708..e66a6275f1 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -948,7 +948,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client, updated.Status.Phase = velerov1api.BackupPhaseFailed updated.Status.FailureReason = fmt.Sprintf("get a backup with status %q during the server starting, mark it as %q", velerov1api.BackupPhaseInProgress, updated.Status.Phase) updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backup)); err != nil { + if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backup)); err != nil { //nolint log.WithError(errors.WithStack(err)).Errorf("failed to patch backup %q", backup.GetName()) continue } @@ -971,7 +971,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client, updated.Status.Phase = velerov1api.RestorePhaseFailed updated.Status.FailureReason = fmt.Sprintf("get a restore with status %q during the server starting, mark it as %q", velerov1api.RestorePhaseInProgress, updated.Status.Phase) updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restore)); err != nil { + if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restore)); err != nil { //nolint log.WithError(errors.WithStack(err)).Errorf("failed to patch restore %q", restore.GetName()) continue } diff --git a/pkg/cmd/util/downloadrequest/downloadrequest.go b/pkg/cmd/util/downloadrequest/downloadrequest.go index f131b32e0c..902dcd7982 100644 --- a/pkg/cmd/util/downloadrequest/downloadrequest.go +++ b/pkg/cmd/util/downloadrequest/downloadrequest.go @@ -111,7 +111,7 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin httpClient := new(http.Client) httpClient.Transport = &http.Transport{ TLSClientConfig: &tls.Config{ - InsecureSkipVerify: insecureSkipTLSVerify, + InsecureSkipVerify: insecureSkipTLSVerify, //nolint:gosec RootCAs: caPool, }, IdleConnTimeout: timeout, diff --git a/pkg/cmd/util/output/backup_describer.go b/pkg/cmd/util/output/backup_describer.go index a6a2f6f996..b17267444e 100644 --- a/pkg/cmd/util/output/backup_describer.go +++ b/pkg/cmd/util/output/backup_describer.go @@ -125,7 +125,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) { } d.Printf("\tIncluded:\t%s\n", s) if len(spec.ExcludedNamespaces) == 0 { - s = "" + s = emptyDisplay } else { s = strings.Join(spec.ExcludedNamespaces, ", ") } @@ -140,7 +140,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) { } d.Printf("\tIncluded:\t%s\n", s) if len(spec.ExcludedResources) == 0 { - s = "" + s = emptyDisplay } else { s = strings.Join(spec.ExcludedResources, ", ") } @@ -149,7 +149,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) { d.Printf("\tCluster-scoped:\t%s\n", BoolPointerString(spec.IncludeClusterResources, "excluded", "included", "auto")) d.Println() - s = "" + s = emptyDisplay if spec.LabelSelector != nil { s = metav1.FormatLabelSelector(spec.LabelSelector) } @@ -169,7 +169,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) { d.Println() if len(spec.Hooks.Resources) == 0 { - d.Printf("Hooks:\t\n") + d.Printf("Hooks:\t" + emptyDisplay + "\n") } else { d.Printf("Hooks:\n") d.Printf("\tResources:\n") @@ -184,7 +184,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) { } d.Printf("\t\t\t\tIncluded:\t%s\n", s) if len(spec.ExcludedNamespaces) == 0 { - s = "" + s = emptyDisplay } else { s = strings.Join(spec.ExcludedNamespaces, ", ") } @@ -199,14 +199,14 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) { } d.Printf("\t\t\t\tIncluded:\t%s\n", s) if len(spec.ExcludedResources) == 0 { - s = "" + s = emptyDisplay } else { s = strings.Join(spec.ExcludedResources, ", ") } d.Printf("\t\t\t\tExcluded:\t%s\n", s) d.Println() - s = "" + s = emptyDisplay if backupResourceHookSpec.LabelSelector != nil { s = metav1.FormatLabelSelector(backupResourceHookSpec.LabelSelector) } diff --git a/pkg/cmd/util/output/output.go b/pkg/cmd/util/output/output.go index bfba88f15e..1a520b0aea 100644 --- a/pkg/cmd/util/output/output.go +++ b/pkg/cmd/util/output/output.go @@ -34,7 +34,10 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/encode" ) -const downloadRequestTimeout = 30 * time.Second +const ( + downloadRequestTimeout = 30 * time.Second + emptyDisplay = "" +) // BindFlags defines a set of output-specific flags within the provided // FlagSet. diff --git a/pkg/cmd/util/output/restore_describer.go b/pkg/cmd/util/output/restore_describer.go index f6b5f2b378..631dbb2cb0 100644 --- a/pkg/cmd/util/output/restore_describer.go +++ b/pkg/cmd/util/output/restore_describer.go @@ -107,7 +107,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel } d.Printf("\tIncluded:\t%s\n", s) if len(restore.Spec.ExcludedNamespaces) == 0 { - s = "" + s = emptyDisplay } else { s = strings.Join(restore.Spec.ExcludedNamespaces, ", ") } @@ -122,7 +122,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel } d.Printf("\tIncluded:\t%s\n", s) if len(restore.Spec.ExcludedResources) == 0 { - s = "" + s = emptyDisplay } else { s = strings.Join(restore.Spec.ExcludedResources, ", ") } @@ -134,7 +134,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel d.DescribeMap("Namespace mappings", restore.Spec.NamespaceMapping) d.Println() - s = "" + s = emptyDisplay if restore.Spec.LabelSelector != nil { s = metav1.FormatLabelSelector(restore.Spec.LabelSelector) } @@ -149,7 +149,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel } d.Println() - s = "" + s = emptyDisplay if restore.Spec.ExistingResourcePolicy != "" { s = string(restore.Spec.ExistingResourcePolicy) } diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index 39444459e4..bf5ddc9325 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -699,7 +699,7 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error { } // Delete the VolumeSnapshots created in the backup, when CSI feature is enabled. - c.deleteVolumeSnapshot(volumeSnapshots, volumeSnapshotContents, *backup, backupLog) + c.deleteVolumeSnapshot(volumeSnapshots, volumeSnapshotContents, backupLog) } @@ -751,7 +751,7 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error { return err } - if errs := persistBackup(backup, backupFile, logFile, backupStore, c.logger.WithField(Backup, kubeutil.NamespaceAndName(backup)), volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses); len(errs) > 0 { + if errs := persistBackup(backup, backupFile, logFile, backupStore, volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses); len(errs) > 0 { fatalErrs = append(fatalErrs, errs...) } @@ -795,7 +795,6 @@ func recordBackupMetrics(log logrus.FieldLogger, backup *velerov1api.Backup, bac func persistBackup(backup *pkgbackup.Request, backupContents, backupLog *os.File, backupStore persistence.BackupStore, - log logrus.FieldLogger, csiVolumeSnapshots []snapshotv1api.VolumeSnapshot, csiVolumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, csiVolumesnapshotClasses []snapshotv1api.VolumeSnapshotClass, @@ -945,7 +944,7 @@ func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, vo // change DeletionPolicy to Retain before deleting VS, then change DeletionPolicy back to Delete. func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []snapshotv1api.VolumeSnapshot, volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, - backup pkgbackup.Request, logger logrus.FieldLogger) { + logger logrus.FieldLogger) { var wg sync.WaitGroup vscMap := make(map[string]snapshotv1api.VolumeSnapshotContent) for _, vsc := range volumeSnapshotContents { diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index 371179e56c..7b188d98b3 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -329,7 +329,7 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque if restore.Spec.BackupName != backup.Name { continue } - restoreLog := log.WithField("restore", kube.NamespaceAndName(&restore)) + restoreLog := log.WithField("restore", kube.NamespaceAndName(&restore)) //nolint restoreLog.Info("Deleting restore log/results from backup storage") if err := backupStore.DeleteRestore(restore.Name); err != nil { @@ -339,8 +339,8 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque } restoreLog.Info("Deleting restore referencing backup") - if err := r.Delete(ctx, &restore); err != nil { - errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restore)).Error()) + if err := r.Delete(ctx, &restore); err != nil { //nolint + errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restore)).Error()) //nolint } } } @@ -427,7 +427,7 @@ func (r *backupDeletionReconciler) deleteExistingDeletionRequests(ctx context.Co if dbr.Name == req.Name { continue } - if err := r.Delete(ctx, &dbr); err != nil { + if err := r.Delete(ctx, &dbr); err != nil { //nolint errs = append(errs, errors.WithStack(err)) } else { log.Infof("deletion request '%s' removed.", dbr.Name) diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index 8352e3ccd6..30ebe3b094 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -323,7 +323,7 @@ func (b *backupSyncReconciler) deleteOrphanedBackups(ctx context.Context, locati continue } - if err := b.client.Delete(ctx, &backup, &client.DeleteOptions{}); err != nil { + if err := b.client.Delete(ctx, &backup, &client.DeleteOptions{}); err != nil { //nolint log.WithError(errors.WithStack(err)).Error("Error deleting orphaned backup from cluster") } else { log.Debug("Deleted orphaned backup from cluster") @@ -347,7 +347,7 @@ func (b *backupSyncReconciler) deleteCSISnapshotsByBackup(ctx context.Context, b for _, vs := range vsList.Items { name := kube.NamespaceAndName(vs.GetObjectMeta()) log.Debugf("Deleting volumesnapshot %s", name) - if err := b.client.Delete(context.TODO(), &vs); err != nil { + if err := b.client.Delete(context.TODO(), &vs); err != nil { //nolint log.WithError(err).Warnf("Failed to delete volumesnapshot %s", name) } } @@ -379,11 +379,11 @@ func backupSyncSourceOrderFunc(objList client.ObjectList) client.ObjectList { bslArray = append(bslArray, &inputBSLList.Items[i]) // append everything before the default for _, bsl := range inputBSLList.Items[:i] { - bslArray = append(bslArray, &bsl) + bslArray = append(bslArray, &bsl) //nolint } // append everything after the default for _, bsl := range inputBSLList.Items[i+1:] { - bslArray = append(bslArray, &bsl) + bslArray = append(bslArray, &bsl) //nolint } meta.SetList(resultBSLList, bslArray) diff --git a/pkg/controller/pod_volume_restore_controller.go b/pkg/controller/pod_volume_restore_controller.go index 9c78171ae9..5bdaf80390 100644 --- a/pkg/controller/pod_volume_restore_controller.go +++ b/pkg/controller/pod_volume_restore_controller.go @@ -302,7 +302,7 @@ func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *ve // Write a done file with name= into the just-created .velero dir // within the volume. The velero init container on the pod is waiting // for this file to exist in each restored volume before completing. - if err := ioutil.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0644); err != nil { + if err := ioutil.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0644); err != nil { //nolint:gosec return errors.Wrap(err, "error writing done file") } diff --git a/pkg/controller/restore_controller.go b/pkg/controller/restore_controller.go index d14917589b..038e971095 100644 --- a/pkg/controller/restore_controller.go +++ b/pkg/controller/restore_controller.go @@ -463,7 +463,7 @@ func (c *restoreController) fetchBackupInfo(backupName string, pluginManager cli func (c *restoreController) runValidatedRestore(restore *api.Restore, info backupInfo) error { // instantiate the per-restore logger that will output both to a temp file // (for upload to object storage) and to stdout. - restoreLog, err := newRestoreLogger(restore, c.logger, c.restoreLogLevel, c.logFormat) + restoreLog, err := newRestoreLogger(restore, c.restoreLogLevel, c.logFormat) if err != nil { return err } @@ -577,14 +577,14 @@ func (c *restoreController) runValidatedRestore(restore *api.Restore, info backu "errors": restoreErrors, } - if err := putResults(restore, m, info.backupStore, c.logger); err != nil { + if err := putResults(restore, m, info.backupStore); err != nil { c.logger.WithError(err).Error("Error uploading restore results to backup storage") } return nil } -func putResults(restore *api.Restore, results map[string]pkgrestore.Result, backupStore persistence.BackupStore, log logrus.FieldLogger) error { +func putResults(restore *api.Restore, results map[string]pkgrestore.Result, backupStore persistence.BackupStore) error { buf := new(bytes.Buffer) gzw := gzip.NewWriter(buf) defer gzw.Close() @@ -669,7 +669,7 @@ type restoreLogger struct { w *gzip.Writer } -func newRestoreLogger(restore *api.Restore, baseLogger logrus.FieldLogger, logLevel logrus.Level, logFormat logging.Format) (*restoreLogger, error) { +func newRestoreLogger(restore *api.Restore, logLevel logrus.Level, logFormat logging.Format) (*restoreLogger, error) { file, err := ioutil.TempFile("", "") if err != nil { return nil, errors.Wrap(err, "error creating temp file") diff --git a/pkg/nodeagent/node_agent.go b/pkg/nodeagent/node_agent.go index cf1d32fbb5..091c20b51b 100644 --- a/pkg/nodeagent/node_agent.go +++ b/pkg/nodeagent/node_agent.go @@ -62,7 +62,7 @@ func IsRunningInNode(ctx context.Context, namespace string, nodeName string, pod } for _, pod := range pods.Items { - if kube.IsPodRunning(&pod) != nil { + if kube.IsPodRunning(&pod) != nil { //nolint continue } diff --git a/pkg/plugin/clientmgmt/process/client_builder.go b/pkg/plugin/clientmgmt/process/client_builder.go index 4b6c27731f..aaa56661a3 100644 --- a/pkg/plugin/clientmgmt/process/client_builder.go +++ b/pkg/plugin/clientmgmt/process/client_builder.go @@ -77,7 +77,7 @@ func (b *clientBuilder) clientConfig() *hcplugin.ClientConfig { string(common.PluginKindItemSnapshotter): framework.NewItemSnapshotterPlugin(common.ClientLogger(b.clientLogger)), }, Logger: b.pluginLogger, - Cmd: exec.Command(b.commandName, b.commandArgs...), + Cmd: exec.Command(b.commandName, b.commandArgs...), //nolint } } diff --git a/pkg/repository/config/aws.go b/pkg/repository/config/aws.go index 0ff4ca218a..a3c63b949c 100644 --- a/pkg/repository/config/aws.go +++ b/pkg/repository/config/aws.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:gosec package config import ( diff --git a/pkg/repository/config/azure.go b/pkg/repository/config/azure.go index 8c5871c527..c90c4f0ac7 100644 --- a/pkg/repository/config/azure.go +++ b/pkg/repository/config/azure.go @@ -50,18 +50,18 @@ func getSubscriptionID(config map[string]string) string { return os.Getenv(subscriptionIDEnvVar) } -func getStorageAccountKey(config map[string]string) (string, *azure.Environment, error) { +func getStorageAccountKey(config map[string]string) (string, error) { credentialsFile := selectCredentialsFile(config) if err := loadCredentialsIntoEnv(credentialsFile); err != nil { - return "", nil, err + return "", err } // Get Azure cloud from AZURE_CLOUD_NAME, if it exists. If the env var does not // exist, parseAzureEnvironment will return azure.PublicCloud. env, err := parseAzureEnvironment(os.Getenv(cloudNameEnvVar)) if err != nil { - return "", nil, errors.Wrap(err, "unable to parse azure cloud name environment variable") + return "", errors.Wrap(err, "unable to parse azure cloud name environment variable") } // Get storage key from secret using key config[storageAccountKeyEnvVarConfigKey]. If the config does not @@ -69,21 +69,21 @@ func getStorageAccountKey(config map[string]string) (string, *azure.Environment, if secretKeyEnvVar := config[storageAccountKeyEnvVarConfigKey]; secretKeyEnvVar != "" { storageKey := os.Getenv(secretKeyEnvVar) if storageKey == "" { - return "", env, errors.Errorf("no storage key secret with key %s found", secretKeyEnvVar) + return "", errors.Errorf("no storage key secret with key %s found", secretKeyEnvVar) } - return storageKey, env, nil + return storageKey, nil } // get subscription ID from object store config or AZURE_SUBSCRIPTION_ID environment variable subscriptionID := getSubscriptionID(config) if subscriptionID == "" { - return "", nil, errors.New("azure subscription ID not found in object store's config or in environment variable") + return "", errors.New("azure subscription ID not found in object store's config or in environment variable") } // we need config["resourceGroup"], config["storageAccount"] - if _, err := getRequiredValues(mapLookup(config), resourceGroupConfigKey, storageAccountConfigKey); err != nil { - return "", env, errors.Wrap(err, "unable to get all required config values") + if err := getRequiredValues(mapLookup(config), resourceGroupConfigKey, storageAccountConfigKey); err != nil { + return "", errors.Wrap(err, "unable to get all required config values") } // get authorizer from environment in the following order: @@ -93,7 +93,7 @@ func getStorageAccountKey(config map[string]string) (string, *azure.Environment, // 4. MSI (managed service identity) authorizer, err := auth.NewAuthorizerFromEnvironment() if err != nil { - return "", nil, errors.Wrap(err, "error getting authorizer from environment") + return "", errors.Wrap(err, "error getting authorizer from environment") } // get storageAccountsClient @@ -103,10 +103,10 @@ func getStorageAccountKey(config map[string]string) (string, *azure.Environment, // get storage key res, err := storageAccountsClient.ListKeys(context.TODO(), config[resourceGroupConfigKey], config[storageAccountConfigKey], storagemgmt.Kerb) if err != nil { - return "", env, errors.WithStack(err) + return "", errors.WithStack(err) } if res.Keys == nil || len(*res.Keys) == 0 { - return "", env, errors.New("No storage keys found") + return "", errors.New("No storage keys found") } var storageKey string @@ -120,10 +120,10 @@ func getStorageAccountKey(config map[string]string) (string, *azure.Environment, } if storageKey == "" { - return "", env, errors.New("No storage key with Full permissions found") + return "", errors.New("No storage key with Full permissions found") } - return storageKey, env, nil + return storageKey, nil } func mapLookup(data map[string]string) func(string) string { @@ -136,12 +136,12 @@ func mapLookup(data map[string]string) func(string) string { // relies on (AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY) based // on info in the provided object storage location config map. func GetAzureResticEnvVars(config map[string]string) (map[string]string, error) { - storageAccountKey, _, err := getStorageAccountKey(config) + storageAccountKey, err := getStorageAccountKey(config) if err != nil { return nil, err } - if _, err := getRequiredValues(mapLookup(config), storageAccountConfigKey); err != nil { + if err := getRequiredValues(mapLookup(config), storageAccountConfigKey); err != nil { return nil, errors.Wrap(err, "unable to get all required config values") } @@ -191,7 +191,7 @@ func parseAzureEnvironment(cloudName string) (*azure.Environment, error) { return &env, errors.WithStack(err) } -func getRequiredValues(getValue func(string) string, keys ...string) (map[string]string, error) { +func getRequiredValues(getValue func(string) string, keys ...string) error { missing := []string{} results := map[string]string{} @@ -204,10 +204,10 @@ func getRequiredValues(getValue func(string) string, keys ...string) (map[string } if len(missing) > 0 { - return nil, errors.Errorf("the following keys do not have values: %s", strings.Join(missing, ", ")) + return errors.Errorf("the following keys do not have values: %s", strings.Join(missing, ", ")) } - return results, nil + return nil } // GetAzureStorageDomain gets the Azure storage domain required by a Azure blob connection, @@ -221,7 +221,7 @@ func GetAzureStorageDomain(config map[string]string) string { } func GetAzureCredentials(config map[string]string) (string, string, error) { - storageAccountKey, _, err := getStorageAccountKey(config) + storageAccountKey, err := getStorageAccountKey(config) if err != nil { return "", "", err } diff --git a/pkg/repository/config/gcp.go b/pkg/repository/config/gcp.go index ed9e3ec6a8..88b5d5ff35 100644 --- a/pkg/repository/config/gcp.go +++ b/pkg/repository/config/gcp.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:gosec package config import "os" diff --git a/pkg/repository/keys/keys.go b/pkg/repository/keys/keys.go index 2ec8bc0ba3..48d3bd75ca 100644 --- a/pkg/repository/keys/keys.go +++ b/pkg/repository/keys/keys.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:gosec package keys import ( diff --git a/pkg/repository/provider/unified_repo.go b/pkg/repository/provider/unified_repo.go index 6bb4f38445..30d1c97aaa 100644 --- a/pkg/repository/provider/unified_repo.go +++ b/pkg/repository/provider/unified_repo.go @@ -280,12 +280,12 @@ func (urp *unifiedRepoProvider) DefaultMaintenanceFrequency(ctx context.Context, } func (urp *unifiedRepoProvider) GetPassword(param interface{}) (string, error) { - repoParam, ok := param.(RepoParam) + _, ok := param.(RepoParam) if !ok { return "", errors.Errorf("invalid parameter, expect %T, actual %T", RepoParam{}, param) } - repoPassword, err := getRepoPassword(urp.credentialGetter.FromSecret, repoParam) + repoPassword, err := getRepoPassword(urp.credentialGetter.FromSecret) if err != nil { return "", errors.Wrap(err, "error to get repo password") } @@ -330,7 +330,7 @@ func (urp *unifiedRepoProvider) GetStoreOptions(param interface{}) (map[string]s return storeOptions, nil } -func getRepoPassword(secretStore credentials.SecretStore, param RepoParam) (string, error) { +func getRepoPassword(secretStore credentials.SecretStore) (string, error) { if secretStore == nil { return "", errors.New("invalid credentials interface") } diff --git a/pkg/restic/command.go b/pkg/restic/command.go index d06968018f..a484a53161 100644 --- a/pkg/restic/command.go +++ b/pkg/restic/command.go @@ -77,7 +77,7 @@ func (c *Command) String() string { // Cmd returns an exec.Cmd for the command. func (c *Command) Cmd() *exec.Cmd { parts := c.StringSlice() - cmd := exec.Command(parts[0], parts[1:]...) + cmd := exec.Command(parts[0], parts[1:]...) //nolint cmd.Dir = c.Dir if len(c.Env) > 0 { diff --git a/pkg/restore/admissionwebhook_config_action.go b/pkg/restore/admissionwebhook_config_action.go index 8fd5c1693e..7a617b6f82 100644 --- a/pkg/restore/admissionwebhook_config_action.go +++ b/pkg/restore/admissionwebhook_config_action.go @@ -72,13 +72,13 @@ func (a *AdmissionWebhookConfigurationAction) Execute(input *velero.RestoreItemA newWebhooks := make([]interface{}, 0) for i, entry := range webhooks { logger2 := logger.WithField("index", i) - obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&entry) + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&entry) //nolint if err != nil { logger2.Errorf("failed to convert the webhook entry, error: %v, it will be dropped", err) continue } s, _, _ := unstructured.NestedString(obj, "sideEffects") - if s != "None" && s != "NoneOnDryRun" { + if s != "None" && s != "NoneOnDryRun" { //nolint logger2.Infof("reset the invalid sideEffects value '%s' to 'None'", s) obj["sideEffects"] = "None" } diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 2779553cea..ef6578a03a 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -1523,7 +1523,7 @@ func shouldRenamePV(ctx *restoreContext, obj *unstructured.Unstructured, client // remapClaimRefNS remaps a PersistentVolume's claimRef.Namespace based on a // restore's NamespaceMappings, if necessary. Returns true if the namespace was // remapped, false if it was not required. -func remapClaimRefNS(ctx *restoreContext, obj *unstructured.Unstructured) (bool, error) { +func remapClaimRefNS(ctx *restoreContext, obj *unstructured.Unstructured) (bool, error) { //nolint:unparam if len(ctx.restore.Spec.NamespaceMapping) == 0 { ctx.log.Debug("Persistent volume does not need to have the claimRef.namespace remapped because restore is not remapping any namespaces") return false, nil diff --git a/pkg/util/kube/utils.go b/pkg/util/kube/utils.go index bf7ac0011c..42f0a35419 100644 --- a/pkg/util/kube/utils.go +++ b/pkg/util/kube/utils.go @@ -125,7 +125,7 @@ func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1 for _, item := range pod.Spec.Volumes { if item.Name == volumeName { - volume = &item + volume = &item //nolint break } } From c92f06ef172a658302ee401e835ac716867a6deb Mon Sep 17 00:00:00 2001 From: Lyndon-Li Date: Tue, 18 Oct 2022 22:26:22 +0800 Subject: [PATCH 3/3] fix lint loop iterator problem Signed-off-by: Lyndon-Li --- changelogs/unreleased/5459-lyndon | 1 + golangci.yaml | 2 +- pkg/cmd/cli/backup/create.go | 10 +++------- pkg/cmd/cli/backup/describe.go | 4 ++-- pkg/cmd/cli/backuplocation/create.go | 4 ++-- pkg/cmd/cli/backuplocation/delete.go | 12 ++++++------ pkg/cmd/cli/backuplocation/set.go | 4 ++-- pkg/cmd/cli/nodeagent/server.go | 8 ++++---- pkg/cmd/cli/restore/create.go | 12 ++++-------- pkg/cmd/cli/restore/describe.go | 4 ++-- pkg/cmd/cli/schedule/describe.go | 4 ++-- pkg/cmd/server/server.go | 8 ++++---- pkg/controller/backup_deletion_controller.go | 12 ++++++------ pkg/controller/backup_sync_controller.go | 14 ++++++++------ pkg/nodeagent/node_agent.go | 6 +++--- pkg/repository/provider/unified_repo_test.go | 2 +- pkg/restore/admissionwebhook_config_action.go | 6 +++--- pkg/util/kube/utils.go | 6 +++--- 18 files changed, 57 insertions(+), 62 deletions(-) create mode 100644 changelogs/unreleased/5459-lyndon diff --git a/changelogs/unreleased/5459-lyndon b/changelogs/unreleased/5459-lyndon new file mode 100644 index 0000000000..ba04f2082a --- /dev/null +++ b/changelogs/unreleased/5459-lyndon @@ -0,0 +1 @@ +Upgrade velero docker image to use go 1.18 and upgrade golangci-lint to 1.45.0 \ No newline at end of file diff --git a/golangci.yaml b/golangci.yaml index 8c6d87b63e..e958170d02 100644 --- a/golangci.yaml +++ b/golangci.yaml @@ -117,7 +117,7 @@ linters-settings: # minimal length of string constant, 3 by default min-len: 3 # minimal occurrences count to trigger, 3 by default - min-occurrences: 3 + min-occurrences: 5 gocritic: # Which checks should be enabled; can't be combined with 'disabled-checks'; # See https://go-critic.github.io/overview#checks-overview diff --git a/pkg/cmd/cli/backup/create.go b/pkg/cmd/cli/backup/create.go index 27c0be3729..52aeab3d52 100644 --- a/pkg/cmd/cli/backup/create.go +++ b/pkg/cmd/cli/backup/create.go @@ -112,10 +112,6 @@ func NewCreateOptions() *CreateOptions { } } -const ( - strTrue = "true" -) - func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.DurationVar(&o.TTL, "ttl", o.TTL, "How long before the backup can be garbage collected.") flags.Var(&o.IncludeNamespaces, "include-namespaces", "Namespaces to include in the backup (use '*' for all namespaces).") @@ -131,13 +127,13 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "Take snapshots of PersistentVolumes as part of the backup. If the parameter is not set, it is treated as setting to 'true'.") // this allows the user to just specify "--snapshot-volumes" as shorthand for "--snapshot-volumes=true" // like a normal bool flag - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "Include cluster-scoped resources in the backup") - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" f = flags.VarPF(&o.DefaultVolumesToFsBackup, "default-volumes-to-fs-backup", "", "Use pod volume file system backup by default for volumes") - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" } // BindWait binds the wait flag separately so it is not called by other create diff --git a/pkg/cmd/cli/backup/describe.go b/pkg/cmd/cli/backup/describe.go index 3e5ebbded3..1284e29c93 100644 --- a/pkg/cmd/cli/backup/describe.go +++ b/pkg/cmd/cli/backup/describe.go @@ -73,7 +73,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { } first := true - for _, backup := range backups.Items { + for i, backup := range backups.Items { deleteRequestListOptions := pkgbackup.NewDeleteBackupRequestListOptions(backup.Name, string(backup.UID)) deleteRequestList, err := veleroClient.VeleroV1().DeleteBackupRequests(f.Namespace()).List(context.TODO(), deleteRequestListOptions) if err != nil { @@ -102,7 +102,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { } } - s := output.DescribeBackup(context.Background(), kbClient, &backup, deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) //nolint + s := output.DescribeBackup(context.Background(), kbClient, &backups.Items[i], deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) if first { first = false fmt.Print(s) diff --git a/pkg/cmd/cli/backuplocation/create.go b/pkg/cmd/cli/backuplocation/create.go index ec19b6e56d..e08e0c6ae2 100644 --- a/pkg/cmd/cli/backuplocation/create.go +++ b/pkg/cmd/cli/backuplocation/create.go @@ -209,10 +209,10 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil { return errors.WithStack(err) } - for _, location := range locations.Items { + for i, location := range locations.Items { if location.Spec.Default { location.Spec.Default = false - if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { //nolint + if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil { return errors.WithStack(err) } break diff --git a/pkg/cmd/cli/backuplocation/delete.go b/pkg/cmd/cli/backuplocation/delete.go index d67ed54369..becb92afb3 100644 --- a/pkg/cmd/cli/backuplocation/delete.go +++ b/pkg/cmd/cli/backuplocation/delete.go @@ -115,8 +115,8 @@ func Run(f client.Factory, o *cli.DeleteOptions) error { } // Create a backup-location deletion request for each - for _, location := range locations.Items { - if err := kbClient.Delete(context.Background(), &location, &kbclient.DeleteOptions{}); err != nil { //nolint + for i, location := range locations.Items { + if err := kbClient.Delete(context.Background(), &locations.Items[i], &kbclient.DeleteOptions{}); err != nil { errs = append(errs, errors.WithStack(err)) continue } @@ -162,8 +162,8 @@ func findAssociatedBackupRepos(client kbclient.Client, bslName, ns string) (vele func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []error { var errs []error - for _, backup := range backups.Items { - if err := client.Delete(context.Background(), &backup, &kbclient.DeleteOptions{}); err != nil { //nolint + for i, backup := range backups.Items { + if err := client.Delete(context.Background(), &backups.Items[i], &kbclient.DeleteOptions{}); err != nil { errs = append(errs, errors.WithStack(fmt.Errorf("delete backup %q associated with deleted BSL: %w", backup.Name, err))) continue } @@ -174,8 +174,8 @@ func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []err func deleteBackupRepos(client kbclient.Client, repos velerov1api.BackupRepositoryList) []error { var errs []error - for _, repo := range repos.Items { - if err := client.Delete(context.Background(), &repo, &kbclient.DeleteOptions{}); err != nil { //nolint + for i, repo := range repos.Items { + if err := client.Delete(context.Background(), &repos.Items[i], &kbclient.DeleteOptions{}); err != nil { errs = append(errs, errors.WithStack(fmt.Errorf("delete backup repository %q associated with deleted BSL: %w", repo.Name, err))) continue } diff --git a/pkg/cmd/cli/backuplocation/set.go b/pkg/cmd/cli/backuplocation/set.go index 44730d659d..bdb36b867c 100644 --- a/pkg/cmd/cli/backuplocation/set.go +++ b/pkg/cmd/cli/backuplocation/set.go @@ -120,7 +120,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error { if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil { return errors.WithStack(err) } - for _, location := range locations.Items { + for i, location := range locations.Items { if !location.Spec.Default { continue } @@ -129,7 +129,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error { break } location.Spec.Default = false - if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { //nolint + if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil { return errors.WithStack(err) } break diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index 7fb3dfc827..b154f9bf81 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -294,7 +294,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) { s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups") return } - for _, pvb := range pvbs.Items { + for i, pvb := range pvbs.Items { if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseInProgress { s.logger.Debugf("the status of podvolumebackup %q is %q, skip", pvb.GetName(), pvb.Status.Phase) continue @@ -307,7 +307,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) { pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed pvb.Status.Message = fmt.Sprintf("get a podvolumebackup with status %q during the server starting, mark it as %q", velerov1api.PodVolumeBackupPhaseInProgress, pvb.Status.Phase) pvb.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(s.ctx, &pvb, ctrlclient.MergeFrom(original)); err != nil { //nolint + if err := client.Patch(s.ctx, &pvbs.Items[i], ctrlclient.MergeFrom(original)); err != nil { s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumebackup %q", pvb.GetName()) continue } @@ -321,7 +321,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) { s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores") return } - for _, pvr := range pvrs.Items { + for i, pvr := range pvrs.Items { if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseInProgress { s.logger.Debugf("the status of podvolumerestore %q is %q, skip", pvr.GetName(), pvr.Status.Phase) continue @@ -345,7 +345,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) { pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed pvr.Status.Message = fmt.Sprintf("get a podvolumerestore with status %q during the server starting, mark it as %q", velerov1api.PodVolumeRestorePhaseInProgress, pvr.Status.Phase) pvr.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(s.ctx, &pvr, ctrlclient.MergeFrom(original)); err != nil { //nolint + if err := client.Patch(s.ctx, &pvrs.Items[i], ctrlclient.MergeFrom(original)); err != nil { s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName()) continue } diff --git a/pkg/cmd/cli/restore/create.go b/pkg/cmd/cli/restore/create.go index 2ac45e8ee8..7625c1f762 100644 --- a/pkg/cmd/cli/restore/create.go +++ b/pkg/cmd/cli/restore/create.go @@ -107,10 +107,6 @@ func NewCreateOptions() *CreateOptions { } } -const ( - strTrue = "true" -) - func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.StringVar(&o.BackupName, "from-backup", "", "Backup to restore from") flags.StringVar(&o.ScheduleName, "from-schedule", "", "Schedule to restore from") @@ -127,18 +123,18 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { f := flags.VarPF(&o.RestoreVolumes, "restore-volumes", "", "Whether to restore volumes from snapshots.") // this allows the user to just specify "--restore-volumes" as shorthand for "--restore-volumes=true" // like a normal bool flag - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" f = flags.VarPF(&o.PreserveNodePorts, "preserve-nodeports", "", "Whether to preserve nodeports of Services when restoring.") // this allows the user to just specify "--preserve-nodeports" as shorthand for "--preserve-nodeports=true" // like a normal bool flag - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "Include cluster-scoped resources in the restore.") - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" f = flags.VarPF(&o.AllowPartiallyFailed, "allow-partially-failed", "", "If using --from-schedule, whether to consider PartiallyFailed backups when looking for the most recent one. This flag has no effect if not using --from-schedule.") - f.NoOptDefVal = strTrue + f.NoOptDefVal = "true" flags.BoolVarP(&o.Wait, "wait", "w", o.Wait, "Wait for the operation to complete.") } diff --git a/pkg/cmd/cli/restore/describe.go b/pkg/cmd/cli/restore/describe.go index e8cbb8d885..88514e8651 100644 --- a/pkg/cmd/cli/restore/describe.go +++ b/pkg/cmd/cli/restore/describe.go @@ -68,14 +68,14 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { } first := true - for _, restore := range restores.Items { + for i, restore := range restores.Items { opts := newPodVolumeRestoreListOptions(restore.Name) podvolumeRestoreList, err := veleroClient.VeleroV1().PodVolumeRestores(f.Namespace()).List(context.TODO(), opts) if err != nil { fmt.Fprintf(os.Stderr, "error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err) } - s := output.DescribeRestore(context.Background(), kbClient, &restore, podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) //nolint + s := output.DescribeRestore(context.Background(), kbClient, &restores.Items[i], podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) if first { first = false fmt.Print(s) diff --git a/pkg/cmd/cli/schedule/describe.go b/pkg/cmd/cli/schedule/describe.go index 5c64610436..b43cad45fc 100644 --- a/pkg/cmd/cli/schedule/describe.go +++ b/pkg/cmd/cli/schedule/describe.go @@ -53,8 +53,8 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { } first := true - for _, schedule := range schedules.Items { - s := output.DescribeSchedule(&schedule) //nolint + for i := range schedules.Items { + s := output.DescribeSchedule(&schedules.Items[i]) if first { first = false fmt.Print(s) diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index e66a6275f1..cc1bb8367a 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -939,7 +939,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client, return } - for _, backup := range backups.Items { + for i, backup := range backups.Items { if backup.Status.Phase != velerov1api.BackupPhaseInProgress { log.Debugf("the status of backup %q is %q, skip", backup.GetName(), backup.Status.Phase) continue @@ -948,7 +948,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client, updated.Status.Phase = velerov1api.BackupPhaseFailed updated.Status.FailureReason = fmt.Sprintf("get a backup with status %q during the server starting, mark it as %q", velerov1api.BackupPhaseInProgress, updated.Status.Phase) updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backup)); err != nil { //nolint + if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backups.Items[i])); err != nil { log.WithError(errors.WithStack(err)).Errorf("failed to patch backup %q", backup.GetName()) continue } @@ -962,7 +962,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client, log.WithError(errors.WithStack(err)).Error("failed to list restores") return } - for _, restore := range restores.Items { + for i, restore := range restores.Items { if restore.Status.Phase != velerov1api.RestorePhaseInProgress { log.Debugf("the status of restore %q is %q, skip", restore.GetName(), restore.Status.Phase) continue @@ -971,7 +971,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client, updated.Status.Phase = velerov1api.RestorePhaseFailed updated.Status.FailureReason = fmt.Sprintf("get a restore with status %q during the server starting, mark it as %q", velerov1api.RestorePhaseInProgress, updated.Status.Phase) updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} - if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restore)); err != nil { //nolint + if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restores.Items[i])); err != nil { log.WithError(errors.WithStack(err)).Errorf("failed to patch restore %q", restore.GetName()) continue } diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index 7b188d98b3..76c39c8c84 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -325,11 +325,11 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque }); err != nil { log.WithError(errors.WithStack(err)).Error("Error listing restore API objects") } else { - for _, restore := range restoreList.Items { + for i, restore := range restoreList.Items { if restore.Spec.BackupName != backup.Name { continue } - restoreLog := log.WithField("restore", kube.NamespaceAndName(&restore)) //nolint + restoreLog := log.WithField("restore", kube.NamespaceAndName(&restoreList.Items[i])) restoreLog.Info("Deleting restore log/results from backup storage") if err := backupStore.DeleteRestore(restore.Name); err != nil { @@ -339,8 +339,8 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque } restoreLog.Info("Deleting restore referencing backup") - if err := r.Delete(ctx, &restore); err != nil { //nolint - errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restore)).Error()) //nolint + if err := r.Delete(ctx, &restoreList.Items[i]); err != nil { + errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restoreList.Items[i])).Error()) } } } @@ -423,11 +423,11 @@ func (r *backupDeletionReconciler) deleteExistingDeletionRequests(ctx context.Co return []error{errors.Wrap(err, "error listing existing DeleteBackupRequests for backup")} } var errs []error - for _, dbr := range dbrList.Items { + for i, dbr := range dbrList.Items { if dbr.Name == req.Name { continue } - if err := r.Delete(ctx, &dbr); err != nil { //nolint + if err := r.Delete(ctx, &dbrList.Items[i]); err != nil { errs = append(errs, errors.WithStack(err)) } else { log.Infof("deletion request '%s' removed.", dbr.Name) diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index 30ebe3b094..d29a586416 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -317,13 +317,13 @@ func (b *backupSyncReconciler) deleteOrphanedBackups(ctx context.Context, locati return } - for _, backup := range backupList.Items { + for i, backup := range backupList.Items { log = log.WithField("backup", backup.Name) if backup.Status.Phase != velerov1api.BackupPhaseCompleted || backupStoreBackups.Has(backup.Name) { continue } - if err := b.client.Delete(ctx, &backup, &client.DeleteOptions{}); err != nil { //nolint + if err := b.client.Delete(ctx, &backupList.Items[i], &client.DeleteOptions{}); err != nil { log.WithError(errors.WithStack(err)).Error("Error deleting orphaned backup from cluster") } else { log.Debug("Deleted orphaned backup from cluster") @@ -344,10 +344,10 @@ func (b *backupSyncReconciler) deleteCSISnapshotsByBackup(ctx context.Context, b if err := b.client.List(ctx, &vsList, listOptions); err != nil { log.WithError(err).Warnf("Failed to list volumesnapshots for backup: %s, the deletion will be skipped", backupName) } else { - for _, vs := range vsList.Items { + for i, vs := range vsList.Items { name := kube.NamespaceAndName(vs.GetObjectMeta()) log.Debugf("Deleting volumesnapshot %s", name) - if err := b.client.Delete(context.TODO(), &vs); err != nil { //nolint + if err := b.client.Delete(context.TODO(), &vsList.Items[i]); err != nil { log.WithError(err).Warnf("Failed to delete volumesnapshot %s", name) } } @@ -379,11 +379,13 @@ func backupSyncSourceOrderFunc(objList client.ObjectList) client.ObjectList { bslArray = append(bslArray, &inputBSLList.Items[i]) // append everything before the default for _, bsl := range inputBSLList.Items[:i] { - bslArray = append(bslArray, &bsl) //nolint + cpBsl := bsl + bslArray = append(bslArray, &cpBsl) } // append everything after the default for _, bsl := range inputBSLList.Items[i+1:] { - bslArray = append(bslArray, &bsl) //nolint + cpBsl := bsl + bslArray = append(bslArray, &cpBsl) } meta.SetList(resultBSLList, bslArray) diff --git a/pkg/nodeagent/node_agent.go b/pkg/nodeagent/node_agent.go index 091c20b51b..cae3e88e57 100644 --- a/pkg/nodeagent/node_agent.go +++ b/pkg/nodeagent/node_agent.go @@ -61,12 +61,12 @@ func IsRunningInNode(ctx context.Context, namespace string, nodeName string, pod return errors.Wrap(err, "failed to list daemonset pods") } - for _, pod := range pods.Items { - if kube.IsPodRunning(&pod) != nil { //nolint + for i := range pods.Items { + if kube.IsPodRunning(&pods.Items[i]) != nil { continue } - if pod.Spec.NodeName == nodeName { + if pods.Items[i].Spec.NodeName == nodeName { return nil } } diff --git a/pkg/repository/provider/unified_repo_test.go b/pkg/repository/provider/unified_repo_test.go index e6f891cce7..c8121058c6 100644 --- a/pkg/repository/provider/unified_repo_test.go +++ b/pkg/repository/provider/unified_repo_test.go @@ -506,7 +506,7 @@ func TestGetRepoPassword(t *testing.T) { }, } - password, err := getRepoPassword(urp.credentialGetter.FromSecret, RepoParam{}) + password, err := getRepoPassword(urp.credentialGetter.FromSecret) require.Equal(t, tc.expected, password) diff --git a/pkg/restore/admissionwebhook_config_action.go b/pkg/restore/admissionwebhook_config_action.go index 7a617b6f82..74fd1b3ff9 100644 --- a/pkg/restore/admissionwebhook_config_action.go +++ b/pkg/restore/admissionwebhook_config_action.go @@ -70,15 +70,15 @@ func (a *AdmissionWebhookConfigurationAction) Execute(input *velero.RestoreItemA return velero.NewRestoreItemActionExecuteOutput(input.Item), nil } newWebhooks := make([]interface{}, 0) - for i, entry := range webhooks { + for i := range webhooks { logger2 := logger.WithField("index", i) - obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&entry) //nolint + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhooks[i]) if err != nil { logger2.Errorf("failed to convert the webhook entry, error: %v, it will be dropped", err) continue } s, _, _ := unstructured.NestedString(obj, "sideEffects") - if s != "None" && s != "NoneOnDryRun" { //nolint + if s != "None" && s != "NoneOnDryRun" { logger2.Infof("reset the invalid sideEffects value '%s' to 'None'", s) obj["sideEffects"] = "None" } diff --git a/pkg/util/kube/utils.go b/pkg/util/kube/utils.go index 42f0a35419..73603b059f 100644 --- a/pkg/util/kube/utils.go +++ b/pkg/util/kube/utils.go @@ -123,9 +123,9 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, cli client.Client) (string, error) { var volume *corev1api.Volume - for _, item := range pod.Spec.Volumes { - if item.Name == volumeName { - volume = &item //nolint + for i := range pod.Spec.Volumes { + if pod.Spec.Volumes[i].Name == volumeName { + volume = &pod.Spec.Volumes[i] break } }