diff --git a/cmd/machine-config-controller/start.go b/cmd/machine-config-controller/start.go index 73640c7849..b3e4ff4c83 100644 --- a/cmd/machine-config-controller/start.go +++ b/cmd/machine-config-controller/start.go @@ -20,6 +20,7 @@ import ( "github.com/openshift/machine-config-operator/pkg/version" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/leaderelection" "k8s.io/klog/v2" ) @@ -68,15 +69,29 @@ func runStartCmd(_ *cobra.Command, _ []string) { ctrlctx := ctrlcommon.CreateControllerContext(ctx, cb) + select { + case <-ctrlctx.FeatureGateAccess.InitialFeatureGatesObserved(): + featureGates, err := ctrlctx.FeatureGateAccess.CurrentFeatureGates() + if err != nil { + klog.Fatalf("Could not get FG: %w", err) + } else { + klog.Infof("FeatureGates initialized: knownFeatureGates=%v", featureGates.KnownFeatures()) + } + case <-time.After(1 * time.Minute): + klog.Fatalf("Could not get FG, timed out: %w", err) + } + // Start the metrics handler go ctrlcommon.StartMetricsListener(startOpts.promMetricsListenAddress, ctrlctx.Stop, ctrlcommon.RegisterMCCMetrics) - controllers := createControllers(ctrlctx) + kubeClient := ctrlctx.ClientBuilder.KubeClientOrDie("machine-config-controller") + controllers := createControllers(ctrlctx, kubeClient) draincontroller := drain.New( drain.DefaultConfig(), ctrlctx.KubeInformerFactory.Core().V1().Nodes(), ctrlctx.ClientBuilder.KubeClientOrDie("node-update-controller"), ctrlctx.ClientBuilder.MachineConfigClientOrDie("node-update-controller"), + ctrlctx.FeatureGateAccess, ) // Start the shared factory informers that you need to use in your controller @@ -130,9 +145,9 @@ func runStartCmd(_ *cobra.Command, _ []string) { panic("unreachable") } -func createControllers(ctx *ctrlcommon.ControllerContext) []ctrlcommon.Controller { - var controllers []ctrlcommon.Controller +func createControllers(ctx *ctrlcommon.ControllerContext, kubeClient kubernetes.Interface) []ctrlcommon.Controller { + var controllers []ctrlcommon.Controller controllers = append(controllers, // Our primary MCs come from here template.New( diff --git a/cmd/machine-config-daemon/start.go b/cmd/machine-config-daemon/start.go index 34e9e72912..54df712e45 100644 --- a/cmd/machine-config-daemon/start.go +++ b/cmd/machine-config-daemon/start.go @@ -5,6 +5,7 @@ import ( "flag" "net/url" "os" + "time" "k8s.io/client-go/tools/clientcmd" @@ -166,16 +167,33 @@ func runStartCmd(_ *cobra.Command, _ []string) { go ctrlcommon.StartMetricsListener(startOpts.promMetricsURL, stopCh, daemon.RegisterMCDMetrics) ctrlctx := ctrlcommon.CreateControllerContext(ctx, cb) + + ctrlctx.ConfigInformerFactory.Start(ctrlctx.Stop) + + select { + case <-ctrlctx.FeatureGateAccess.InitialFeatureGatesObserved(): + featureGates, err := ctrlctx.FeatureGateAccess.CurrentFeatureGates() + if err != nil { + klog.Fatalf("Could not get FG: %w", err) + } else { + klog.Infof("FeatureGates initialized: knownFeatureGates=%v", featureGates.KnownFeatures()) + } + case <-time.After(1 * time.Minute): + klog.Fatalf("Could not get FG, timed out: %w", err) + } + // create the daemon instance. this also initializes kube client items // which need to come from the container and not the chroot. err = dn.ClusterConnect( startOpts.nodeName, kubeClient, + ctrlctx.ClientBuilder.MachineConfigClientOrDie(componentName), ctrlctx.InformerFactory.Machineconfiguration().V1().MachineConfigs(), ctrlctx.KubeInformerFactory.Core().V1().Nodes(), ctrlctx.InformerFactory.Machineconfiguration().V1().ControllerConfigs(), startOpts.kubeletHealthzEnabled, startOpts.kubeletHealthzEndpoint, + ctrlctx.FeatureGateAccess, ) if err != nil { klog.Fatalf("Failed to initialize: %v", err) diff --git a/cmd/machine-config-operator/start.go b/cmd/machine-config-operator/start.go index 79d5579f24..7168e67bbe 100644 --- a/cmd/machine-config-operator/start.go +++ b/cmd/machine-config-operator/start.go @@ -4,6 +4,7 @@ import ( "context" "flag" "os" + "time" "github.com/openshift/machine-config-operator/cmd/common" "github.com/openshift/machine-config-operator/internal/clients" @@ -67,6 +68,17 @@ func runStartCmd(_ *cobra.Command, _ []string) { go common.SignalHandler(runCancel) ctrlctx := ctrlcommon.CreateControllerContext(ctx, cb) + select { + case <-ctrlctx.FeatureGateAccess.InitialFeatureGatesObserved(): + featureGates, err := ctrlctx.FeatureGateAccess.CurrentFeatureGates() + if err != nil { + klog.Fatalf("Could not get FG: %w", err) + } else { + klog.Infof("FeatureGates initialized: knownFeatureGates=%v", featureGates.KnownFeatures()) + } + case <-time.After(1 * time.Minute): + klog.Fatalf("Could not get FG, timed out: %w", err) + } controller := operator.New( ctrlcommon.MCONamespace, componentName, startOpts.imagesFile, @@ -89,6 +101,7 @@ func runStartCmd(_ *cobra.Command, _ []string) { ctrlctx.ClientBuilder.KubeClientOrDie(componentName), ctrlctx.ClientBuilder.APIExtClientOrDie(componentName), ctrlctx.ClientBuilder.ConfigClientOrDie(componentName), + ctrlctx.ClientBuilder.OperatorClientOrDie(componentName), ctrlctx.OpenShiftKubeAPIServerKubeNamespacedInformerFactory.Core().V1().ConfigMaps(), ctrlctx.KubeInformerFactory.Core().V1().Nodes(), ctrlctx.KubeMAOSharedInformer.Core().V1().Secrets(), @@ -97,6 +110,8 @@ func runStartCmd(_ *cobra.Command, _ []string) { ctrlctx.KubeNamespacedInformerFactory.Core().V1().Secrets(), ctrlctx.OpenShiftConfigKubeNamespacedInformerFactory.Core().V1().Secrets(), ctrlctx.ConfigInformerFactory.Config().V1().ClusterOperators(), + ctrlctx.NamespacedInformerFactory.Machineconfiguration().V1alpha1().MachineConfigNodes(), + ctrlctx.FeatureGateAccess, ) ctrlctx.NamespacedInformerFactory.Start(ctrlctx.Stop) diff --git a/docs/ContainerRuntimeConfigDesign.md b/docs/ContainerRuntimeConfigDesign.md index ce7270971f..dce9fb80c0 100644 --- a/docs/ContainerRuntimeConfigDesign.md +++ b/docs/ContainerRuntimeConfigDesign.md @@ -68,8 +68,8 @@ e.g. ```bash $ oc logs -f -n openshift-machine-config-operator machine-config-controller-6fc64d9654-mdtv4 -W0330 08:03:49.665463 1 reflector.go:436] github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/factory.go:101: watch of *v1.ContainerRuntimeConfig ended with: an error on the server ("unable to decode an event from the watch stream: unable to decode watch event: v1.ContainerRuntimeConfig.Spec: v1.ContainerRuntimeConfigSpec.MachineConfigPoolSelector: ContainerRuntimeConfig: v1.ContainerRuntimeConfiguration.OverlaySize: unmarshalerDecoder: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$', error found in #10 byte of ...|\":\"9asadG\"},\"machine|..., bigger context ...|:{\"containerRuntimeConfig\":{\"overlaySize\":\"9asadG\"},\"machineConfigPoolSelector\":{\"matchLabels\":{\"cus|...") has prevented the request from succeeding -E0330 08:03:50.810155 1 reflector.go:138] github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/factory.go:101: Failed to watch *v1.ContainerRuntimeConfig: failed to list *v1.ContainerRuntimeConfig: v1.ContainerRuntimeConfigList.Items: []v1.ContainerRuntimeConfig: v1.ContainerRuntimeConfig.Spec: v1.ContainerRuntimeConfigSpec.MachineConfigPoolSelector: ContainerRuntimeConfig: v1.ContainerRuntimeConfiguration.OverlaySize: unmarshalerDecoder: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$', error found in #10 byte of ...|":"9asadG"},"machine|..., bigger context ...|:{"containerRuntimeConfig":{"overlaySize":"9asadG"},"machineConfigPoolSelector":{"matchLabels":{"cus|... +W0330 08:03:49.665463 1 reflector.go:436] github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:101: watch of *v1.ContainerRuntimeConfig ended with: an error on the server ("unable to decode an event from the watch stream: unable to decode watch event: v1.ContainerRuntimeConfig.Spec: v1.ContainerRuntimeConfigSpec.MachineConfigPoolSelector: ContainerRuntimeConfig: v1.ContainerRuntimeConfiguration.OverlaySize: unmarshalerDecoder: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$', error found in #10 byte of ...|\":\"9asadG\"},\"machine|..., bigger context ...|:{\"containerRuntimeConfig\":{\"overlaySize\":\"9asadG\"},\"machineConfigPoolSelector\":{\"matchLabels\":{\"cus|...") has prevented the request from succeeding +E0330 08:03:50.810155 1 reflector.go:138] github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:101: Failed to watch *v1.ContainerRuntimeConfig: failed to list *v1.ContainerRuntimeConfig: v1.ContainerRuntimeConfigList.Items: []v1.ContainerRuntimeConfig: v1.ContainerRuntimeConfig.Spec: v1.ContainerRuntimeConfigSpec.MachineConfigPoolSelector: ContainerRuntimeConfig: v1.ContainerRuntimeConfiguration.OverlaySize: unmarshalerDecoder: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$', error found in #10 byte of ...|":"9asadG"},"machine|..., bigger context ...|:{"containerRuntimeConfig":{"overlaySize":"9asadG"},"machineConfigPoolSelector":{"matchLabels":{"cus|... ``` ## Example diff --git a/go.mod b/go.mod index f00d4afe16..42448277ea 100644 --- a/go.mod +++ b/go.mod @@ -26,11 +26,11 @@ require ( github.com/google/renameio v0.1.0 github.com/imdario/mergo v0.3.13 github.com/opencontainers/go-digest v1.0.0 - github.com/openshift/api v0.0.0-20231013202211-096c446e7f60 - github.com/openshift/client-go v0.0.0-20231005121823-e81400b97c46 - github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20230516205036-088c6d48cc1a - github.com/openshift/library-go v0.0.0-20231017173800-126f85ed0cc7 - github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b + github.com/openshift/api v0.0.0-20231101131954-24085c95a7a2 + github.com/openshift/client-go v0.0.0-20231110140829-a6ca51f6d5ba + github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231110142214-403ea8439974 + github.com/openshift/library-go v0.0.0-20231020125034-5a2d9fe760b3 + github.com/openshift/runtime-utils v0.0.0-20220926190846-5c488b20a19f github.com/prometheus/client_golang v1.16.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace @@ -54,12 +54,17 @@ require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect github.com/4meepo/tagalign v1.2.2 // indirect github.com/Abirdcfly/dupword v0.0.11 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect github.com/alexkohler/nakedret/v2 v2.0.2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/aws/aws-sdk-go v1.44.248 // indirect github.com/butuzov/mirror v1.1.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect + github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect + github.com/go-errors/errors v1.4.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.3 // indirect github.com/go-openapi/loads v0.21.2 // indirect @@ -67,11 +72,20 @@ require ( github.com/go-openapi/spec v0.20.9 // indirect github.com/go-openapi/strfmt v0.21.7 // indirect github.com/go-openapi/validate v0.22.1 // indirect + github.com/google/btree v1.0.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/kkHAIKE/contextcheck v1.1.4 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/maratori/testableexamples v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/nunnatsa/ginkgolinter v0.12.1 // indirect github.com/oklog/ulid v1.3.1 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/robfig/cron v1.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sigstore/fulcio v1.3.1 // indirect @@ -79,22 +93,25 @@ require ( github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/timonwong/loggercheck v0.9.4 // indirect github.com/xen0n/gosmopolitan v1.2.1 // indirect + github.com/xlab/treeprint v1.2.0 // indirect github.com/ykadowak/zerologlint v0.1.2 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.tmz.dev/musttag v0.7.0 // indirect gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect + k8s.io/cli-runtime v0.28.3 // indirect + sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect ) require ( 4d63.com/gochecknoglobals v0.2.1 // indirect github.com/Antonboom/errname v0.1.10 // indirect github.com/Antonboom/nilnil v0.1.5 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect github.com/InVisionApp/go-logger v1.0.1 // indirect - github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect @@ -110,7 +127,6 @@ require ( github.com/breml/errchkjson v0.3.1 // indirect github.com/butuzov/ireturn v0.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/chai2010/gettext-go v1.0.2 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect @@ -130,15 +146,13 @@ require ( github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/esimonov/ifshort v1.0.4 // indirect github.com/ettle/strcase v0.1.1 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.15.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect github.com/go-critic/go-critic v0.8.1 // indirect - github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -165,10 +179,8 @@ require ( github.com/golangci/misspell v0.4.0 // indirect github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect - github.com/google/btree v1.0.1 // indirect github.com/google/go-containerregistry v0.15.2 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect github.com/gorilla/mux v1.8.0 // indirect @@ -176,7 +188,6 @@ require ( github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect - github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-version v1.6.0 // indirect @@ -200,7 +211,6 @@ require ( github.com/ldez/tagliatelle v0.5.0 // indirect github.com/leonklingele/grouper v1.1.1 // indirect github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect - github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lufeee/execinquery v1.2.1 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -213,14 +223,10 @@ require ( github.com/mbilski/exhaustivestruct v1.2.0 // indirect github.com/mgechev/revive v1.3.2 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect - github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/moricho/tparallel v0.3.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nakabonne/nestif v0.3.1 // indirect @@ -232,7 +238,6 @@ require ( github.com/opencontainers/runc v1.1.7 // indirect github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v1.4.2 // indirect @@ -280,11 +285,9 @@ require ( github.com/ultraware/whitespace v0.0.5 // indirect github.com/uudashr/gocognit v1.0.6 // indirect github.com/vbatts/tar-split v0.11.3 // indirect - github.com/xlab/treeprint v1.2.0 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect gitlab.com/bosi/decorder v0.2.3 // indirect - go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect @@ -307,19 +310,18 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.4.3 // indirect k8s.io/apiserver v0.28.3 // indirect - k8s.io/cli-runtime v0.28.3 // indirect k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect k8s.io/klog/v2 v2.100.1 k8s.io/kube-aggregator v0.28.3 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/kube-openapi v0.0.0-20230905202853-d090da108d2f // indirect mvdan.cc/gofumpt v0.5.0 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect - sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect - sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) + +replace k8s.io/kube-openapi => github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0 diff --git a/go.sum b/go.sum index 6927e8b865..25c67332b0 100644 --- a/go.sum +++ b/go.sum @@ -217,8 +217,8 @@ github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStB github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= @@ -470,8 +470,8 @@ github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3 github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -658,16 +658,18 @@ github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/ github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU= github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/openshift/api v0.0.0-20231013202211-096c446e7f60 h1:vaGyewNUE95Zo+GdK0HEorBlGm3MRa4AC8GUUqF5Ml8= -github.com/openshift/api v0.0.0-20231013202211-096c446e7f60/go.mod h1:qNtV0315F+f8ld52TLtPvrfivZpdimOzTi3kn9IVbtU= -github.com/openshift/client-go v0.0.0-20231005121823-e81400b97c46 h1:J7UsTNgyM1krYnfsmijowYqt5I4mDM1qxNAy4eEa0xc= -github.com/openshift/client-go v0.0.0-20231005121823-e81400b97c46/go.mod h1:xM64ClnmCheAmffZZdTSJejy3yPE1nTRWQthKaZQ7JY= -github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20230516205036-088c6d48cc1a h1:y/vIcJvfAKxLlbtU1uCQea/NSAeUUunEJ68lR/wWKC8= -github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20230516205036-088c6d48cc1a/go.mod h1:O4TuBlo2A+kZiykV1LxUSdEjx6zNqgCKf05+lkTDVZc= -github.com/openshift/library-go v0.0.0-20231017173800-126f85ed0cc7 h1:pJLcCSJzdiWCaJ4bAepgnvwMdP33LumbVJyWSW7+3ng= -github.com/openshift/library-go v0.0.0-20231017173800-126f85ed0cc7/go.mod h1:jgxNp8aApJnZtECid9SUSr5Bu6DLo8Hfdv1DgFZaYA8= -github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b h1:oXzC1N6E9gw76/WH2gEA8GEHvuq09wuVQ9GoCuR8GF4= -github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b/go.mod h1:l9/qeKZuAmYUMl0yicJlbkPGDsIycGhwxOvOAWyaP0E= +github.com/openshift/api v0.0.0-20231101131954-24085c95a7a2 h1:zGTMgBHP7e5Jm91zM0xfGArBOPR3zVv+m8sE7BWGAnY= +github.com/openshift/api v0.0.0-20231101131954-24085c95a7a2/go.mod h1:qNtV0315F+f8ld52TLtPvrfivZpdimOzTi3kn9IVbtU= +github.com/openshift/client-go v0.0.0-20231110140829-a6ca51f6d5ba h1:uZ9gqdJIKUegxqeBqKXbPdd0JfO6aueQ2Ot/gTOhkD8= +github.com/openshift/client-go v0.0.0-20231110140829-a6ca51f6d5ba/go.mod h1:/BACtJX3fnHOlecTC3VW7JPsJU7KCGaUqt/HkWp5ryo= +github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231110142214-403ea8439974 h1:Pb8KC/DpQUMLHTZalQL90ly9j4Fk2R6wIE8SAHbCf8I= +github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231110142214-403ea8439974/go.mod h1:/JRDiOBCyaQBoD7A0nGsTrPvco69QrGX4OmZGHm6MwM= +github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0 h1:GPlAy197Jkr+D0T2FNWanamraTdzS/r9ZkT29lxvHaA= +github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +github.com/openshift/library-go v0.0.0-20231020125034-5a2d9fe760b3 h1:RmbS0Ea69/ejW0HqUoUjO72EfNc/biuVDjODEDl/EnE= +github.com/openshift/library-go v0.0.0-20231020125034-5a2d9fe760b3/go.mod h1:8UzmrBMCn7+GzouL8DVYkL9COBQTB1Ggd13/mHJQCUg= +github.com/openshift/runtime-utils v0.0.0-20220926190846-5c488b20a19f h1:ubRzazPtplWWNWWX07v4ww74S9QL+B2RAxHJ8O00m7o= +github.com/openshift/runtime-utils v0.0.0-20220926190846-5c488b20a19f/go.mod h1:l9/qeKZuAmYUMl0yicJlbkPGDsIycGhwxOvOAWyaP0E= github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= @@ -1396,8 +1398,6 @@ k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-aggregator v0.28.3 h1:CVbj3+cpshSHR5dWPzLYx3sVpIDEPLlzMSxY/lAc9cM= k8s.io/kube-aggregator v0.28.3/go.mod h1:5DyLevbRTcWnT1f9b+lB3BfbXC1w7gDa/OtB6kKInCw= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/kubectl v0.28.3 h1:H1Peu1O3EbN9zHkJCcvhiJ4NUj6lb88sGPO5wrWIM6k= k8s.io/kubectl v0.28.3/go.mod h1:RDAudrth/2wQ3Sg46fbKKl4/g+XImzvbsSRZdP2RiyE= k8s.io/kubelet v0.28.3 h1:bp/uIf1R5F61BlFvFtzc4PDEiK7TtFcw3wFJlc0V0LM= diff --git a/hack/crds-sync.sh b/hack/crds-sync.sh index 50e34bf716..7584d0a6c2 100755 --- a/hack/crds-sync.sh +++ b/hack/crds-sync.sh @@ -1,4 +1,4 @@ - #!/usr/bin/env bash +#!/usr/bin/env bash set -euo pipefail @@ -6,7 +6,8 @@ set -euo pipefail CRDS_MAPPING=( "v1/0000_80_containerruntimeconfig.crd.yaml:0000_80_machine-config-operator_01_containerruntimeconfig.crd.yaml" "v1/0000_80_kubeletconfig.crd.yaml:0000_80_machine-config-operator_01_kubeletconfig.crd.yaml" "v1/0000_80_machineconfig.crd.yaml:0000_80_machine-config-operator_01_machineconfig.crd.yaml" - "v1/0000_80_machineconfigpool.crd.yaml:0000_80_machine-config-operator_01_machineconfigpool.crd.yaml" ) + "v1/0000_80_machineconfigpool.crd.yaml:0000_80_machine-config-operator_01_machineconfigpool.crd.yaml" + "v1alpha1/0000_80_machineconfignode-TechPreviewNoUpgrade.crd.yaml:0000_80_machine-config-operator_01_machineconfignode-TechPreviewNoUpgrade.crd.yaml" ) #TODO(jkyros): 0000_80_machine-config-operator_02_containerruntimeconfig.crd.yaml) for crd in "${CRDS_MAPPING[@]}" ; do @@ -17,3 +18,17 @@ done #this one goes in manifests rather than install, but should it? cp "vendor/github.com/openshift/api/machineconfiguration/v1/0000_80_controllerconfig.crd.yaml" "manifests/controllerconfig.crd.yaml" +cp "vendor/github.com/openshift/api/operator/v1/0000_80_machine-config-operator_01_config.crd.yaml" "install/0000_80_machine-config-operator_01_config.crd.yaml" + + +#v1/0000_10_containerruntimeconfig.crd.yaml:0000_80_machine-config-operator_01_containerruntimeconfig.crd.yaml +#v1/0000_10_kubeletconfig.crd.yaml:0000_80_machine-config-operator_01_kubeletconfig.crd.yaml +#v1/0000_10_machineconfig.crd.yaml:0000_80_machine-config-operator_01_machineconfig.crd.yaml +#v1/0000_10_machineconfigpool.crd.yaml:0000_80_machine-config-operator_01_machineconfigpool.crd.yaml +#TODO(jkyros): 0000_80_machine-config-operator_02_containerruntimeconfig.crd.yaml + +#./vendor/github.com/openshift/api/machineconfiguration/ +#./vendor/github.com/openshift/api/machineconfiguration/ +#./vendor/github.com/openshift/api/machineconfiguration/ +#./vendor/github.com/openshift/api/machineconfiguration/ +#./vendor/github.com/openshift/api/machineconfiguration/v1/ diff --git a/install/0000_80_machine-config-operator_01_config.crd.yaml b/install/0000_80_machine-config-operator_01_config.crd.yaml new file mode 100644 index 0000000000..61ef7740e7 --- /dev/null +++ b/install/0000_80_machine-config-operator_01_config.crd.yaml @@ -0,0 +1,192 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: machineconfigurations.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MachineConfiguration + plural: machineconfigurations + singular: machineconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "MachineConfiguration provides information to configure an operator to manage Machine Configuration. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the Machine Config Operator + type: object + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + type: integer + format: int32 + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + type: string + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + type: integer + format: int32 + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status is the most recently observed status of the Machine Config Operator + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most recent deployment + type: integer + format: int32 + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across individual nodes + type: array + items: + description: NodeStatus provides information about the current state of a particular node managed by this operator. + type: object + properties: + currentRevision: + description: currentRevision is the generation of the most recently successful deployment + type: integer + format: int32 + lastFailedCount: + description: lastFailedCount is how often the installer pod of the last failed revision failed. + type: integer + lastFailedReason: + description: lastFailedReason is a machine readable failure reason string. + type: string + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment we tried and failed to deploy. + type: integer + format: int32 + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + type: array + items: + type: string + lastFailedTime: + description: lastFailedTime is the time the last failed revision failed the last time. + type: string + format: date-time + lastFallbackCount: + description: lastFallbackCount is how often a fallback to a previous revision happened. + type: integer + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment we're trying to apply + type: integer + format: int32 + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/install/0000_80_machine-config-operator_01_machineconfignode-TechPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config-operator_01_machineconfignode-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..0ed2ed8991 --- /dev/null +++ b/install/0000_80_machine-config-operator_01_machineconfignode-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,203 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: machineconfignodes.machineconfiguration.openshift.io + labels: + "openshift.io/operator-managed": "" + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1596 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade +spec: + # group name to use for REST API: /apis// + group: machineconfiguration.openshift.io + scope: Cluster + names: + kind: MachineConfigNode + singular: machineconfignode + plural: machineconfignodes + versions: + - name: v1alpha1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Updated")].status + name: Updated + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePrepared")].status + name: UpdatePrepared + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateExecuted")].status + name: UpdateExecuted + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePostActionComplete")].status + name: UpdatePostActionComplete + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateComplete")].status + name: UpdateComplete + type: string + - jsonPath: .status.conditions[?(@.type=="Resumed")].status + name: Resumed + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateCompatible")].status + name: UpdateCompatible + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="AppliedFilesAndOS")].status + name: UpdatedFilesAndOS + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="CordonedNode")].status + name: CordonedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="DrainedNode")].status + name: DrainedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="RebootedNode")].status + name: RebootedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ReloadedCRIO")].status + name: ReloadedCRIO + priority: 1 + type: string + schema: + openAPIV3Schema: + description: 'MachineConfigNode describes the health of the Machines on the system Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.' + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machine config node. + type: object + required: + - configVersion + - node + - pool + properties: + configVersion: + description: configVersion holds the desired config version for the node targeted by this machine config node resource. The desired version represents the machine config the node will attempt to update to. This gets set before the machine config operator validates the new machine config against the current machine config. + type: object + required: + - desired + properties: + desired: + description: desired is the name of the machine config that the the node should be upgraded to. This value is set when the machine config pool generates a new version of its rendered configuration. When this value is changed, the machine config daemon starts the node upgrade process. This value gets set in the machine config node spec once the machine config has been targeted for upgrade and before it is validated. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + node: + description: node contains a reference to the node for this machine config node. + type: object + required: + - name + properties: + name: + description: name is the object name. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + pool: + description: pool contains a reference to the machine config pool that this machine config node's referenced node belongs to. + type: object + required: + - name + properties: + name: + description: name is the object name. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + status: + description: status describes the last observed state of this machine config node. + type: object + required: + - configVersion + properties: + conditions: + description: conditions represent the observations of a machine config node's current state. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + configVersion: + description: configVersion describes the current and desired machine config for this node. The current version represents the current machine config for the node and is updated after a successful update. The desired version represents the machine config the node will attempt to update to. This desired machine config has been compared to the current machine config and has been validated by the machine config operator as one that is valid and that exists. + type: object + required: + - desired + properties: + current: + description: current is the name of the machine config currently in use on the node. This value is updated once the machine config daemon has completed the update of the configuration for the node. This value should match the desired version unless an upgrade is in progress. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + desired: + description: desired is the MachineConfig the node wants to upgrade to. This value gets set in the machine config node status once the machine config has been validated against the current machine config. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + observedGeneration: + description: observedGeneration represents the generation observed by the controller. This field is updated when the controller observes a change to the desiredConfig in the configVersion of the machine config node spec. + type: integer + format: int64 + x-kubernetes-validations: + - rule: self.metadata.name == self.spec.node.name + message: spec.node.name should match metadata.name diff --git a/lib/resourceapply/machineconfig.go b/lib/resourceapply/machineconfig.go index db3d7ceda8..ae83f184fb 100644 --- a/lib/resourceapply/machineconfig.go +++ b/lib/resourceapply/machineconfig.go @@ -4,7 +4,11 @@ import ( "context" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgclientv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1" + mcfgclientalphav1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" mcoResourceMerge "github.com/openshift/machine-config-operator/lib/resourcemerge" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -49,10 +53,34 @@ func ApplyMachineConfigPool(client mcfgclientv1.MachineConfigPoolsGetter, requir return existing, false, nil } + newAnnos := make(map[string]string) + newAnnos["machineconfiguration.openshift.io/editor"] = "machine-config-controller-render" + existing.SetAnnotations(newAnnos) actual, err := client.MachineConfigPools().Update(context.TODO(), existing, metav1.UpdateOptions{}) return actual, true, err } +// ApplyMachineConfigNode applies the required machineconfignode to the cluster. +func ApplyMachineConfigNode(client mcfgclientalphav1.MachineConfigNodesGetter, required *mcfgalphav1.MachineConfigNode) (*mcfgalphav1.MachineConfigNode, bool, error) { + existing, err := client.MachineConfigNodes().Get(context.TODO(), required.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.MachineConfigNodes().Create(context.TODO(), required, metav1.CreateOptions{}) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + mcoResourceMerge.EnsureMachineConfigNode(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.MachineConfigNodes().Update(context.TODO(), existing, metav1.UpdateOptions{}) + return actual, true, err +} + // ApplyControllerConfig applies the required machineconfig to the cluster. func ApplyControllerConfig(client mcfgclientv1.ControllerConfigsGetter, required *mcfgv1.ControllerConfig) (*mcfgv1.ControllerConfig, bool, error) { existing, err := client.ControllerConfigs().Get(context.TODO(), required.GetName(), metav1.GetOptions{}) diff --git a/lib/resourcemerge/machineconfig.go b/lib/resourcemerge/machineconfig.go index 125b27daff..2b7875ca3b 100644 --- a/lib/resourcemerge/machineconfig.go +++ b/lib/resourcemerge/machineconfig.go @@ -2,10 +2,19 @@ package resourcemerge import ( mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" "k8s.io/apimachinery/pkg/api/equality" ) +// EnsureMachineConfig ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureMachineConfigNode(modified *bool, existing *mcfgalphav1.MachineConfigNode, required mcfgalphav1.MachineConfigNode) { + resourcemerge.EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + ensureMachineConfigNodeSpec(modified, &existing.Spec, required.Spec) +} + // EnsureMachineConfig ensures that the existing matches the required. // modified is set to true when existing had to be updated with required. func EnsureMachineConfig(modified *bool, existing *mcfgv1.MachineConfig, required mcfgv1.MachineConfig) { @@ -44,6 +53,16 @@ func EnsureMachineConfigPool(modified *bool, existing *mcfgv1.MachineConfigPool, } } +func ensureMachineConfigNodeSpec(modified *bool, existing *mcfgalphav1.MachineConfigNodeSpec, required mcfgalphav1.MachineConfigNodeSpec) { + if !equality.Semantic.DeepEqual(existing.Node, required.Node) { + *modified = true + (*existing).Node = required.Node + } + if !equality.Semantic.DeepEqual(existing.Pool, required.Pool) { + *modified = true + (*existing).Pool = required.Pool + } +} func ensureMachineConfigSpec(modified *bool, existing *mcfgv1.MachineConfigSpec, required mcfgv1.MachineConfigSpec) { resourcemerge.SetStringIfSet(modified, &existing.OSImageURL, required.OSImageURL) resourcemerge.SetStringIfSet(modified, &existing.KernelType, required.KernelType) diff --git a/lib/resourceread/machineconfig.go b/lib/resourceread/machineconfig.go index 1203b65ec6..0427d20676 100644 --- a/lib/resourceread/machineconfig.go +++ b/lib/resourceread/machineconfig.go @@ -5,6 +5,10 @@ import ( "fmt" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + opv1 "github.com/openshift/api/operator/v1" + + mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" ) @@ -12,12 +16,24 @@ import ( var ( mcfgScheme = runtime.NewScheme() mcfgCodecs = serializer.NewCodecFactory(mcfgScheme) + + mcfgAlphaScheme = runtime.NewScheme() + mcfgAlphaCodecs = serializer.NewCodecFactory(mcfgAlphaScheme) + + opv1Scheme = runtime.NewScheme() + opv1Codec = serializer.NewCodecFactory(opv1Scheme) ) func init() { + if err := mcfgalphav1.AddToScheme(mcfgAlphaScheme); err != nil { + panic(err) + } if err := mcfgv1.AddToScheme(mcfgScheme); err != nil { panic(err) } + if err := opv1.AddToScheme(opv1Scheme); err != nil { + panic(err) + } } // ReadMachineConfigV1 reads raw MachineConfig object from bytes. Returns MachineConfig and error. @@ -60,6 +76,15 @@ func ReadMachineConfigPoolV1OrDie(objBytes []byte) *mcfgv1.MachineConfigPool { return requiredObj.(*mcfgv1.MachineConfigPool) } +// ReadMachineConfigPoolV1OrDie reads MachineConfigPool object from bytes. Panics on error. +func ReadMachineConfigNodeV1OrDie(objBytes []byte) *mcfgalphav1.MachineConfigNode { + requiredObj, err := runtime.Decode(mcfgAlphaCodecs.UniversalDecoder(mcfgalphav1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*mcfgalphav1.MachineConfigNode) +} + // ReadControllerConfigV1OrDie reads ControllerConfig object from bytes. Panics on error. func ReadControllerConfigV1OrDie(objBytes []byte) *mcfgv1.ControllerConfig { requiredObj, err := runtime.Decode(mcfgCodecs.UniversalDecoder(mcfgv1.SchemeGroupVersion), objBytes) @@ -68,3 +93,11 @@ func ReadControllerConfigV1OrDie(objBytes []byte) *mcfgv1.ControllerConfig { } return requiredObj.(*mcfgv1.ControllerConfig) } + +func ReadMachineConfigurationV1OrDie(objBytes []byte) *opv1.MachineConfiguration { + requiredObj, err := runtime.Decode(opv1Codec.UniversalDecoder(opv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*opv1.MachineConfiguration) +} diff --git a/manifests/default.machineconfiguration.yaml b/manifests/default.machineconfiguration.yaml new file mode 100644 index 0000000000..9a0719a197 --- /dev/null +++ b/manifests/default.machineconfiguration.yaml @@ -0,0 +1,8 @@ +apiVersion: operator.openshift.io/v1 +kind: MachineConfiguration +metadata: + name: default + labels: +spec: + mode: in-cluster + managementState: Managed \ No newline at end of file diff --git a/manifests/machineconfigdaemon/clusterrole.yaml b/manifests/machineconfigdaemon/clusterrole.yaml index a734c52793..2d176d0d3c 100644 --- a/manifests/machineconfigdaemon/clusterrole.yaml +++ b/manifests/machineconfigdaemon/clusterrole.yaml @@ -6,9 +6,15 @@ rules: - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "watch"] +- apiGroups: ["config.openshift.io"] + resources: ["clusterversions", "featuregates"] + verbs: ["*"] - apiGroups: ["machineconfiguration.openshift.io"] resources: ["machineconfigs", "controllerconfigs"] verbs: ["get", "list", "watch"] +- apiGroups: ["machineconfiguration.openshift.io"] + resources: ["machineconfignodes", "machineconfignodes/status"] + verbs: ["create", "update", "patch"] - apiGroups: ["security.openshift.io"] resourceNames: ["privileged"] resources: ["securitycontextconstraints"] diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index a81a076b9e..7b53244bc2 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -4,12 +4,22 @@ import ( "time" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" ) // constants defines some file paths that are shared outside of the // MCO package; and thus consumed by other users +type QueuedEvent struct { + Time metav1.Time `json:"time"` + Type string `json:"type"` + Annotations map[string]string `json:"annotations"` + EventType string `json:"eventType"` + Reason string `json:"reason"` + Message string `json:"message"` +} + const ( // APIServerURLFile is the path to the apiserver url environment file. // See templates/master/00-master/_base/files/apiserver-url-env.yaml diff --git a/pkg/controller/bootstrap/bootstrap.go b/pkg/controller/bootstrap/bootstrap.go index c432cdd949..5a61543860 100644 --- a/pkg/controller/bootstrap/bootstrap.go +++ b/pkg/controller/bootstrap/bootstrap.go @@ -8,6 +8,7 @@ import ( "os" "path/filepath" + mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1 "k8s.io/api/core/v1" @@ -75,6 +76,7 @@ func (b *Bootstrap) Run(destDir string) error { codecFactory := serializer.NewCodecFactory(scheme) decoder := codecFactory.UniversalDecoder(mcfgv1.GroupVersion, apioperatorsv1alpha1.GroupVersion, apicfgv1.GroupVersion) + var states []*mcfgalphav1.MachineConfigNode var cconfig *mcfgv1.ControllerConfig var featureGate *apicfgv1.FeatureGate var nodeConfig *apicfgv1.Node @@ -114,6 +116,8 @@ func (b *Bootstrap) Run(destDir string) error { } switch obj := obji.(type) { + case *mcfgalphav1.MachineConfigNode: + states = append(states, obj) case *mcfgv1.MachineConfigPool: pools = append(pools, obj) case *mcfgv1.MachineConfig: diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go index 7fb9ff1625..a258e29741 100644 --- a/pkg/controller/build/build_controller.go +++ b/pkg/controller/build/build_controller.go @@ -10,8 +10,6 @@ import ( "github.com/containers/image/v5/docker/reference" buildv1 "github.com/openshift/api/build/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" - "github.com/openshift/machine-config-operator/pkg/apihelpers" corev1 "k8s.io/api/core/v1" aggerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -25,12 +23,14 @@ import ( "k8s.io/klog/v2" buildinformers "github.com/openshift/client-go/build/informers/externalversions" + "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" buildinformersv1 "github.com/openshift/client-go/build/informers/externalversions/build/v1" buildclientset "github.com/openshift/client-go/build/clientset/versioned" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + mcfginformers "github.com/openshift/client-go/machineconfiguration/informers/externalversions" mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" @@ -39,6 +39,7 @@ import ( coreinformers "k8s.io/client-go/informers" coreinformersv1 "k8s.io/client-go/informers/core/v1" + "github.com/openshift/machine-config-operator/pkg/apihelpers" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1133,7 +1134,10 @@ func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) erro func (ctrl *Controller) syncFailingStatus(pool *mcfgv1.MachineConfigPool, err error) error { sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to build configuration for pool %s: %v", pool.Name, err)) apihelpers.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) - if _, updateErr := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { + newAnnos := make(map[string]string) + newAnnos["machineconfiguration.openshift.io/editor"] = "machine-config-controller" + pool.SetAnnotations(newAnnos) + if _, updateErr := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { klog.Errorf("Error updating MachineConfigPool %s: %v", pool.Name, updateErr) } return err diff --git a/pkg/controller/build/build_controller_test.go b/pkg/controller/build/build_controller_test.go index 1ed512b325..ba0525647b 100644 --- a/pkg/controller/build/build_controller_test.go +++ b/pkg/controller/build/build_controller_test.go @@ -15,7 +15,6 @@ import ( mcfgv1 "github.com/openshift/api/machineconfiguration/v1" fakeclientbuildv1 "github.com/openshift/client-go/build/clientset/versioned/fake" fakeclientmachineconfigv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" - "github.com/openshift/machine-config-operator/pkg/apihelpers" testhelpers "github.com/openshift/machine-config-operator/test/helpers" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakecorev1client "k8s.io/client-go/kubernetes/fake" @@ -23,6 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" + "github.com/openshift/machine-config-operator/pkg/apihelpers" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" k8serrors "k8s.io/apimachinery/pkg/api/errors" diff --git a/pkg/controller/common/controller_context.go b/pkg/controller/common/controller_context.go index ea520ed094..9226024394 100644 --- a/pkg/controller/common/controller_context.go +++ b/pkg/controller/common/controller_context.go @@ -111,7 +111,7 @@ func CreateControllerContext(ctx context.Context, cb *clients.Builder) *Controll klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) } - recorder := events.NewKubeRecorder(kubeClient.CoreV1().Events(MCONamespace), "cloud-controller-manager-operator", controllerRef) + recorder := events.NewKubeRecorder(kubeClient.CoreV1().Events(MCONamespace), "machine-config-operator", controllerRef) // By default, this will exit(0) the process if the featuregates ever change to a different set of values. featureGateAccessor := featuregates.NewFeatureGateAccess( @@ -119,6 +119,7 @@ func CreateControllerContext(ctx context.Context, cb *clients.Builder) *Controll configSharedInformer.Config().V1().ClusterVersions(), configSharedInformer.Config().V1().FeatureGates(), recorder, ) + go featureGateAccessor.Run(ctx) return &ControllerContext{ diff --git a/pkg/controller/drain/drain_controller.go b/pkg/controller/drain/drain_controller.go index f75fedb3c4..8fca5f67c4 100644 --- a/pkg/controller/drain/drain_controller.go +++ b/pkg/controller/drain/drain_controller.go @@ -9,10 +9,13 @@ import ( "strings" "time" + "github.com/openshift/api/machineconfiguration/v1alpha1" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" daemonconsts "github.com/openshift/machine-config-operator/pkg/daemon/constants" + "github.com/openshift/machine-config-operator/pkg/upgrademonitor" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -85,9 +88,11 @@ func DefaultConfig() Config { // Controller defines the node controller. type Controller struct { - client mcfgclientset.Interface - kubeClient clientset.Interface - eventRecorder record.EventRecorder + client mcfgclientset.Interface + kubeClient clientset.Interface + eventRecorder record.EventRecorder + healthEventsRecorder record.EventRecorder + updateEventsRecorder record.EventRecorder syncHandler func(node string) error enqueueNode func(*corev1.Node) @@ -99,6 +104,8 @@ type Controller struct { ongoingDrains map[string]time.Time cfg Config + + featureGatesAccessor featuregates.FeatureGateAccess } // New returns a new node controller. @@ -107,17 +114,19 @@ func New( nodeInformer coreinformersv1.NodeInformer, kubeClient clientset.Interface, mcfgClient mcfgclientset.Interface, + fgAccessor featuregates.FeatureGateAccess, ) *Controller { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) ctrl := &Controller{ - client: mcfgClient, - kubeClient: kubeClient, - eventRecorder: ctrlcommon.NamespacedEventRecorder(eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineconfigcontroller-nodecontroller"})), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineconfigcontroller-nodecontroller"), - cfg: cfg, + client: mcfgClient, + kubeClient: kubeClient, + eventRecorder: ctrlcommon.NamespacedEventRecorder(eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineconfigcontroller-nodecontroller"})), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineconfigcontroller-nodecontroller"), + cfg: cfg, + featureGatesAccessor: fgAccessor, } nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -162,6 +171,7 @@ func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { for i := 0; i < workers; i++ { go wait.Until(ctrl.worker, ctrl.cfg.WaitUntil, stopCh) + } <-stopCh @@ -303,12 +313,17 @@ func (ctrl *Controller) syncNode(key string) error { desiredVerb := strings.Split(desiredState, "-")[0] switch desiredVerb { case daemonconsts.DrainerStateUncordon: + + upgrademonitor.GenerateAndApplyMachineConfigNodes(&upgrademonitor.Condition{State: v1alpha1.MachineConfigNodeUpdateExecuted, Reason: string(v1alpha1.MachineConfigNodeUpdateCordoned), Message: fmt.Sprintf("(Un)Cordoning Node as part of In progress upgrade phase")}, + &upgrademonitor.Condition{State: v1alpha1.MachineConfigNodeUpdateCordoned, Reason: fmt.Sprintf("%s%s", string(v1alpha1.MachineConfigNodeUpdateExecuted), string(v1alpha1.MachineConfigNodeUpdateCordoned)), Message: fmt.Sprintf("(Un)Cordoning node. This process will not be complete untill the the nodes unscheduability = %t", false)}, metav1.ConditionUnknown, metav1.ConditionUnknown, node, ctrl.client, ctrl.featureGatesAccessor) + // ctrl.EmitUpgradeEvent(ctrl.stateControllerPod, ctrl.UpgradeAnnotations(mcfgv1.MachineConfigNodeUpdateComplete, node), corev1.EventTypeNormal, "CordoningNode", fmt.Sprintf("Cordining Node %s as part of update", node.Name)) ctrl.logNode(node, "uncordoning") // perform uncordon if err := ctrl.cordonOrUncordonNode(false, node, drainer); err != nil { return fmt.Errorf("failed to uncordon node %v: %w", node.Name, err) } case daemonconsts.DrainerStateDrain: + //ctrl.EmitUpgradeEvent(ctrl.stateControllerPod, ctrl.UpgradeAnnotations(mcfgv1.MachineConfigNodeUpdateComplete, node), corev1.EventTypeNormal, "DrainingNode", fmt.Sprintf("Draining Node %s as part of update", node.Name)) if err := ctrl.drainNode(node, drainer); err != nil { // If we get an error from drainNode, that means the drain failed. // However, we want to requeue and try again. So we need to return nil @@ -465,7 +480,14 @@ func (ctrl *Controller) cordonOrUncordonNode(desired bool, node *corev1.Node, dr ctrl.logNode(node, "RunCordonOrUncordon() succeeded but node is still not in %s state, retrying", verb) return false, nil } - + upgrademonitor.GenerateAndApplyMachineConfigNodes(&upgrademonitor.Condition{State: v1alpha1.MachineConfigNodeUpdateExecuted, Reason: string(v1alpha1.MachineConfigNodeUpdateCordoned), Message: fmt.Sprintf("(Un)Cordoned Node as part of In progress upgrade phase")}, + &upgrademonitor.Condition{State: v1alpha1.MachineConfigNodeUpdateCordoned, Reason: fmt.Sprintf("%s%s", string(v1alpha1.MachineConfigNodeUpdateExecuted), string(v1alpha1.MachineConfigNodeUpdateCordoned)), Message: fmt.Sprintf("(Un)Cordoned node. The node is reporting Unschedulable = %t", node.Spec.Unschedulable)}, + metav1.ConditionUnknown, + metav1.ConditionTrue, + node, + ctrl.client, + ctrl.featureGatesAccessor, + ) ctrl.logNode(node, "%s succeeded (currently schedulable: %t)", verb, !updatedNode.Spec.Unschedulable) return true, nil }); err != nil { diff --git a/pkg/controller/kubelet-config/kubelet_config_controller.go b/pkg/controller/kubelet-config/kubelet_config_controller.go index c8a8e3b1b8..c4a2366e99 100644 --- a/pkg/controller/kubelet-config/kubelet_config_controller.go +++ b/pkg/controller/kubelet-config/kubelet_config_controller.go @@ -21,6 +21,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" @@ -77,8 +78,10 @@ var errCouldNotFindMCPSet = errors.New("could not find any MachineConfigPool set type Controller struct { templatesDir string - client mcfgclientset.Interface - configClient configclientset.Interface + client mcfgclientset.Interface + kubeClient kubernetes.Interface + configClient configclientset.Interface + eventRecorder record.EventRecorder syncHandler func(mcp string) error @@ -131,6 +134,7 @@ func New( templatesDir: templatesDir, client: mcfgClient, configClient: configclient, + kubeClient: kubeClient, eventRecorder: ctrlcommon.NamespacedEventRecorder(eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineconfigcontroller-kubeletconfigcontroller"})), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineconfigcontroller-kubeletconfigcontroller"), featureQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineconfigcontroller-featurecontroller"), @@ -204,6 +208,7 @@ func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { for i := 0; i < workers; i++ { go wait.Until(ctrl.nodeConfigWorker, time.Second, stopCh) + } <-stopCh @@ -426,6 +431,9 @@ func (ctrl *Controller) syncStatusOnly(cfg *mcfgv1.KubeletConfig, err error, arg if statusUpdateError != nil { klog.Warningf("error updating kubeletconfig status: %v", statusUpdateError) } + if err != nil { + //ctrl.EmitHealthEvent(ctrl.stateControllerPod, ctrl.HealthAnnotations(cfg.Name, string(v1.KC), v1.MachineConfigNodeErrored), corev1.EventTypeWarning, "KubeletSyncError", fmt.Sprintf("Error Syncing KubeletConfig: %s", err.Error())) + } return err } @@ -473,6 +481,7 @@ func (ctrl *Controller) syncKubeletConfig(key string) error { klog.V(4).Infof("Finished syncing kubeletconfig %q (%v)", key, time.Since(startTime)) }() + //ctrl.EmitHealthEvent(ctrl.stateControllerPod, ctrl.HealthAnnotations(ctrlcommon.ControllerConfigName, string(v1.CC), v1.MCCSync), corev1.EventTypeNormal, "CheckingControllerConfig", "Checking if ControllerConfig is complete") // Wait to apply a kubelet config if the controller config is not completed if err := apihelpers.IsControllerConfigCompleted(ctrlcommon.ControllerConfigName, ctrl.ccLister.Get); err != nil { return err @@ -493,6 +502,8 @@ func (ctrl *Controller) syncKubeletConfig(key string) error { return err } + //ctrl.EmitHealthEvent(ctrl.stateControllerPod, ctrl.HealthAnnotations(name, string(v1.KC), v1.MCCSync), corev1.EventTypeNormal, "GotKubeletConfig", "Got KubeletConfig to Sync") + // Deep-copy otherwise we are mutating our cache. cfg = cfg.DeepCopy() @@ -515,6 +526,7 @@ func (ctrl *Controller) syncKubeletConfig(key string) error { } // Find all MachineConfigPools + //ctrl.EmitHealthEvent(ctrl.stateControllerPod, ctrl.HealthAnnotations(cfg.Name, string(v1.KC), v1.MCCSync), corev1.EventTypeNormal, "SyncingKubeletStatus", "Syncing KubeletConfigStatus") mcpPools, err := ctrl.getPoolsForKubeletConfig(cfg) if err != nil { return ctrl.syncStatusOnly(cfg, err) @@ -533,6 +545,7 @@ func (ctrl *Controller) syncKubeletConfig(key string) error { } for _, pool := range mcpPools { + //ctrl.EmitHealthEvent(ctrl.stateControllerPod, ctrl.HealthAnnotations(cfg.Name, string(v1.KC), v1.MCCSync), corev1.EventTypeNormal, "ApplyingConfigsForKubelet", fmt.Sprintf("Applying Kubelet MCs associated with Pool %s", pool.Name)) if pool.Spec.Configuration.Name == "" { updateDelay := 5 * time.Second // Previously we spammed the logs about empty pools. @@ -585,6 +598,8 @@ func (ctrl *Controller) syncKubeletConfig(key string) error { originalKubeConfig.TLSMinVersion = observedMinTLSVersion originalKubeConfig.TLSCipherSuites = observedCipherSuites + //ctrl.EmitHealthEvent(ctrl.stateControllerPod, ctrl.HealthAnnotations(cfg.Name, string(v1.KC), v1.MCCSync), corev1.EventTypeNormal, "GeneratingIgnForKubelet", "Generating Ignition for OriginalKubeletConfig") + kubeletIgnition, logLevelIgnition, autoSizingReservedIgnition, err := generateKubeletIgnFiles(cfg, originalKubeConfig) if err != nil { return ctrl.syncStatusOnly(cfg, err) diff --git a/pkg/controller/node/node_controller.go b/pkg/controller/node/node_controller.go index 611394ba42..419a809246 100644 --- a/pkg/controller/node/node_controller.go +++ b/pkg/controller/node/node_controller.go @@ -876,6 +876,25 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { return err } + // make this a function + // update all nodes for pool with pool prog. + nodes, err := ctrl.getNodesForPool(machineconfigpool) + if err != nil { + return err + } + /* + annos := make(map[string]string) + annos["ms"] = "ControllerState" + annos["state"] = "StateControllerSyncController" + annos["ObjectKind"] = string(mcfgv1.MCP) + annos["ObjectName"] = machineconfigpool.Name + + s, err := state.StateControllerPod(ctrl.kubeClient) + if err != nil { + klog.Error(err) + } + */ + //ctrl.EmitHealthEvent(s, annos, corev1.EventTypeNormal, "GotMachineConfigPool", fmt.Sprintf("Got Machine Config Pool %s", key)) if machineconfigpool.Spec.Configuration.Name == "" { // Previously we spammed the logs about empty pools. // Let's just pause for a bit here to let the renderer @@ -924,7 +943,8 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { klog.V(4).Infof("Pool %s is not layered", pool.Name) } - nodes, err := ctrl.getNodesForPool(pool) + //ctrl.EmitHealthEvent(s, annos, corev1.EventTypeNormal, "SyncingPoolStatus", fmt.Sprintf("Syncing MachineConfigPool Status %s", key)) + nodes, err = ctrl.getNodesForPool(pool) if err != nil { if syncErr := ctrl.syncStatusOnly(pool); syncErr != nil { errs := kubeErrs.NewAggregate([]error{syncErr, err}) @@ -970,6 +990,7 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { } } } + //ctrl.EmitHealthEvent(s, annos, corev1.EventTypeNormal, "SyncingPoolMachines", fmt.Sprintf("Syncing MachineConfigPool's Machine with the proper Config Annotations %s", key)) candidates, capacity := getAllCandidateMachines(pool, nodes, maxunavail) if len(candidates) > 0 { zones := make(map[string]bool) diff --git a/pkg/controller/node/node_controller_test.go b/pkg/controller/node/node_controller_test.go index 785ec28a0b..5cf76e9aba 100644 --- a/pkg/controller/node/node_controller_test.go +++ b/pkg/controller/node/node_controller_test.go @@ -8,6 +8,8 @@ import ( "testing" "time" + mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -1140,7 +1142,7 @@ func TestShouldMakeProgress(t *testing.T) { } else { t.Logf("not expecting annotation") } - expStatus := calculateStatus(cc, mcp, nodes) + expStatus := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) expMcp := mcp.DeepCopy() expMcp.Status = expStatus f.expectUpdateMachineConfigPoolStatus(expMcp) @@ -1192,7 +1194,7 @@ func TestPaused(t *testing.T) { f.kubeobjects = append(f.kubeobjects, nodes[idx]) } - expStatus := calculateStatus(cc, mcp, nodes) + expStatus := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) expMcp := mcp.DeepCopy() expMcp.Status = expStatus f.expectUpdateMachineConfigPoolStatus(expMcp) @@ -1220,7 +1222,7 @@ func TestShouldUpdateStatusOnlyUpdated(t *testing.T) { f.kubeobjects = append(f.kubeobjects, nodes[idx]) } - expStatus := calculateStatus(cc, mcp, nodes) + expStatus := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) expMcp := mcp.DeepCopy() expMcp.Status = expStatus f.expectUpdateMachineConfigPoolStatus(expMcp) @@ -1248,7 +1250,7 @@ func TestShouldUpdateStatusOnlyNoProgress(t *testing.T) { f.kubeobjects = append(f.kubeobjects, nodes[idx]) } - expStatus := calculateStatus(cc, mcp, nodes) + expStatus := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) expMcp := mcp.DeepCopy() expMcp.Status = expStatus f.expectUpdateMachineConfigPoolStatus(expMcp) @@ -1281,7 +1283,7 @@ func TestCertStatus(t *testing.T) { f.kubeobjects = append(f.kubeobjects, nodes[idx]) } - expStatus := calculateStatus(cc, mcp, nodes) + expStatus := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) expMcp := mcp.DeepCopy() expMcp.Status = expStatus @@ -1301,7 +1303,7 @@ func TestShouldDoNothing(t *testing.T) { newNodeWithLabel("node-0", machineConfigV1, machineConfigV1, map[string]string{"node-role/worker": "", "node-role/infra": ""}), newNodeWithLabel("node-1", machineConfigV1, machineConfigV1, map[string]string{"node-role/worker": "", "node-role/infra": ""}), } - status := calculateStatus(cc, mcp, nodes) + status := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) mcp.Status = status f.ccLister = append(f.ccLister, cc) @@ -1392,7 +1394,7 @@ func TestControlPlaneTopology(t *testing.T) { for _, node := range nodes { addNodeAnnotations(node, annotations) } - status := calculateStatus(cc, mcp, nodes) + status := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) mcp.Status = status f.ccLister = append(f.ccLister, cc) diff --git a/pkg/controller/node/status.go b/pkg/controller/node/status.go index 7c79e4b9e6..373689924a 100644 --- a/pkg/controller/node/status.go +++ b/pkg/controller/node/status.go @@ -5,6 +5,9 @@ import ( "fmt" "strings" + "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" v1 "github.com/openshift/api/machineconfiguration/v1" "github.com/openshift/machine-config-operator/pkg/apihelpers" @@ -17,6 +20,7 @@ import ( "k8s.io/klog/v2" ) +// syncStatusOnly for MachineConfigNode func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { cc, err := ctrl.ccLister.Get(ctrlcommon.ControllerConfigName) if err != nil { @@ -27,14 +31,28 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { return err } - newStatus := calculateStatus(cc, pool, nodes) + machineConfigStates := []*mcfgalphav1.MachineConfigNode{} + for _, node := range nodes { + ms, err := ctrl.client.MachineconfigurationV1alpha1().MachineConfigNodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + if err != nil { + klog.Errorf("Could not find our MachineConfigNode fornode. %s: %w", node.Name, err) + continue + } + machineConfigStates = append(machineConfigStates, ms) + } + newStatus := calculateStatus(machineConfigStates, cc, pool, nodes) if equality.Semantic.DeepEqual(pool.Status, newStatus) { return nil } newPool := pool newPool.Status = newStatus - _, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) + newAnnos := make(map[string]string) + newAnnos["machineconfiguration.openshift.io/editor"] = "machine-config-controller-render" + pool.SetAnnotations(newAnnos) + if _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), newPool, metav1.UpdateOptions{}); err != nil { + return err + } if pool.Spec.Configuration.Name != newPool.Spec.Configuration.Name { ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Updating", "Pool %s now targeting %s", pool.Name, getPoolUpdateLine(newPool)) } @@ -44,7 +62,7 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { return err } -func calculateStatus(cconfig *v1.ControllerConfig, pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) mcfgv1.MachineConfigPoolStatus { +func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *v1.ControllerConfig, pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) mcfgv1.MachineConfigPoolStatus { certExpirys := []v1.CertExpiry{} if cconfig != nil { for _, cert := range cconfig.Status.ControllerCertificates { @@ -60,16 +78,117 @@ func calculateStatus(cconfig *v1.ControllerConfig, pool *mcfgv1.MachineConfigPoo } machineCount := int32(len(nodes)) - updatedMachines := getUpdatedMachines(pool, nodes) + // modify this to use state controller data somehow + // look for update errors to get degraded machines + // updated means the most recent condition is updated in the state controller + // unavailable? I guess this means everything in the process of working thru an upgrade of some kind or having some sort of day to day MCD progress + // is unavilable + + // instead of basing everything on nodes (which we don't own) base it on pool state. + // however, we can't change the whole updated,ready,unavailable machine logic too much besides cordoning + + // if each machinestate (upgrading) is per pool, we need to not have just a node assoc with each MS but somehow a node attached + // to the progression + /* + updatedMachines := getUpdatedMachines(pool.Spec.Configuration.Name, nodes) + updatedMachineCount := int32(len(updatedMachines)) + + readyMachines := getReadyMachines(pool.Spec.Configuration.Name, nodes) + readyMachineCount := int32(len(readyMachines)) + + unavailableMachines := getUnavailableMachines(nodes) + unavailableMachineCount := int32(len(unavailableMachines)) + + degradedMachines := getDegradedMachines(nodes) + degradedReasons := []string{} + for _, n := range degradedMachines { + reason, ok := n.Annotations[daemonconsts.MachineConfigDaemonReasonAnnotationKey] + if ok && reason != "" { + degradedReasons = append(degradedReasons, fmt.Sprintf("Node %s is reporting: %q", n.Name, reason)) + } + } + */ + // in the event you are upgrading between versions, the statecontroller is going to get confused and it seems to depend on + // the operator pod being rolled out + + var degradedMachines, readyMachines, updatedMachines, unavailableMachines, updatingMachines []*corev1.Node + // if we represent updating properly here, we will also represent updating properly in the CO + // so this solves the cordoning RFE and the upgradeable RFE + // updating == updatePrepared, updateExecuted, updatedComplete + // updated == nodeResumed, updated + // ready == nodeResumed, updated + // unavailable == draining, cordoned + // degraded == if the condition.Reason == error + // this ensures that a MCP only enters Upgradeable==False if the node actually needs to upgrade to the new MC + for _, state := range mcs { + var ourNode *corev1.Node + for _, n := range nodes { + if state.Name == n.Name { + ourNode = n + break + } + } + if ourNode == nil { + klog.Errorf("Could not find specificed node %s", state.Name) + } + if len(state.Status.Conditions) == 0 { + // not ready yet + break + } + for _, cond := range state.Status.Conditions { + if cond.Reason == "Error" { + degradedMachines = append(degradedMachines, ourNode) + continue + } + if cond.Status == metav1.ConditionUnknown { + switch v1alpha1.StateProgress(cond.Type) { + case mcfgalphav1.MachineConfigNodeUpdatePrepared: + updatingMachines = append(updatedMachines, ourNode) + case mcfgalphav1.MachineConfigNodeUpdateExecuted: + updatingMachines = append(updatingMachines, ourNode) + case mcfgalphav1.MachineConfigNodeUpdatePostActionComplete: + updatingMachines = append(updatingMachines, ourNode) + case mcfgalphav1.MachineConfigNodeUpdateComplete: + updatingMachines = append(updatingMachines, ourNode) + case mcfgalphav1.MachineConfigNodeResumed: + updatedMachines = append(updatingMachines, ourNode) + readyMachines = append(readyMachines, ourNode) + case mcfgalphav1.MachineConfigNodeUpdateCompatible: + updatedMachines = append(updatingMachines, ourNode) + case mcfgalphav1.MachineConfigNodeUpdateDrained: + unavailableMachines = append(unavailableMachines, ourNode) + case mcfgalphav1.MachineConfigNodeUpdateCordoned: + unavailableMachines = append(unavailableMachines, ourNode) + case mcfgalphav1.MachineConfigNodeUpdated: + updatedMachines = append(updatedMachines, ourNode) + readyMachines = append(readyMachines, ourNode) + } + } + } + } + degradedMachineCount := int32(len(degradedMachines)) updatedMachineCount := int32(len(updatedMachines)) - - readyMachines := getReadyMachines(pool, nodes) + unavailableMachineCount := int32(len(unavailableMachines)) + updatingMachineCount := int32(len(updatingMachines)) readyMachineCount := int32(len(readyMachines)) - unavailableMachines := getUnavailableMachines(nodes, pool) - unavailableMachineCount := int32(len(unavailableMachines)) + // this is # 1 priority, get the upgrade states actually reporting + if degradedMachineCount+readyMachineCount+unavailableMachineCount+updatingMachineCount != int32(len(nodes)) { + klog.Infof("new state reporting did not get all nodes, falling back. Sate reporting node total %d and actual node total %d", (degradedMachineCount + readyMachineCount + updatedMachineCount + unavailableMachineCount + updatingMachineCount), len(nodes)) + klog.Infof("degraded: %d ready: %d updated %d unavailable %d updating %d", degradedMachineCount, readyMachineCount, updatedMachineCount, unavailableMachineCount, updatingMachineCount) + updatedMachines = getUpdatedMachines(pool, nodes) + updatedMachineCount = int32(len(updatedMachines)) + + readyMachines = getReadyMachines(pool, nodes) + readyMachineCount = int32(len(readyMachines)) + + unavailableMachines = getUnavailableMachines(nodes, pool) + unavailableMachineCount = int32(len(unavailableMachines)) + + degradedMachines = getDegradedMachines(nodes) + degradedMachineCount = int32(len(degradedMachines)) + } - degradedMachines := getDegradedMachines(nodes) degradedReasons := []string{} for _, n := range degradedMachines { reason, ok := n.Annotations[daemonconsts.MachineConfigDaemonReasonAnnotationKey] @@ -77,7 +196,6 @@ func calculateStatus(cconfig *v1.ControllerConfig, pool *mcfgv1.MachineConfigPoo degradedReasons = append(degradedReasons, fmt.Sprintf("Node %s is reporting: %q", n.Name, reason)) } } - degradedMachineCount := int32(len(degradedMachines)) status := mcfgv1.MachineConfigPoolStatus{ ObservedGeneration: pool.Generation, diff --git a/pkg/controller/node/status_test.go b/pkg/controller/node/status_test.go index 949bc67a94..3ff11e3150 100644 --- a/pkg/controller/node/status_test.go +++ b/pkg/controller/node/status_test.go @@ -5,6 +5,8 @@ import ( "reflect" "testing" + mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" "github.com/openshift/machine-config-operator/pkg/apihelpers" daemonconsts "github.com/openshift/machine-config-operator/pkg/daemon/constants" @@ -879,7 +881,7 @@ func TestCalculateStatus(t *testing.T) { Paused: test.paused, }, } - status := calculateStatus(nil, pool, test.nodes) + status := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, nil, pool, test.nodes) test.verify(status, t) }) } diff --git a/pkg/controller/render/render_controller.go b/pkg/controller/render/render_controller.go index eab1ca9887..3bb918b1c6 100644 --- a/pkg/controller/render/render_controller.go +++ b/pkg/controller/render/render_controller.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" @@ -56,6 +57,8 @@ type Controller struct { client mcfgclientset.Interface eventRecorder record.EventRecorder + kubeClient kubernetes.Interface + syncHandler func(mcp string) error enqueueMachineConfigPool func(*mcfgv1.MachineConfigPool) @@ -85,6 +88,7 @@ func New( ctrl := &Controller{ client: mcfgClient, + kubeClient: kubeClient, eventRecorder: ctrlcommon.NamespacedEventRecorder(eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineconfigcontroller-rendercontroller"})), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineconfigcontroller-rendercontroller"), } @@ -435,6 +439,7 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { return ctrl.syncFailingStatus(pool, fmt.Errorf("no MachineConfigs found matching selector %v", selector)) } + //ctrl.EmitHealthEvent(ctrl.stateControllerPod, ctrl.HealthAnnotations(name, string(v1.MCP), v1.MCCSync), corev1.EventTypeNormal, "SyncingGeneratedMCs", fmt.Sprintf("Syncing Generated MCs for Pool %s in the render controller", pool.Name)) if err := ctrl.syncGeneratedMachineConfig(pool, mcs); err != nil { return ctrl.syncFailingStatus(pool, err) } @@ -448,7 +453,10 @@ func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) erro } sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionFalse, "", "") apihelpers.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) - if _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { + newAnnos := make(map[string]string) + newAnnos["machineconfiguration.openshift.io/editor"] = "machine-config-controller-render" + pool.SetAnnotations(newAnnos) + if _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { return err } return nil @@ -457,8 +465,11 @@ func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) erro func (ctrl *Controller) syncFailingStatus(pool *mcfgv1.MachineConfigPool, err error) error { sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to render configuration for pool %s: %v", pool.Name, err)) apihelpers.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) - if _, updateErr := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { - klog.Errorf("Error updating MachineConfigPool %s: %v", pool.Name, updateErr) + newAnnos := make(map[string]string) + newAnnos["machineconfiguration.openshift.io/editor"] = "machine-config-controller-render" + pool.SetAnnotations(newAnnos) + if _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { + return err } return err } @@ -524,14 +535,20 @@ func (ctrl *Controller) syncGeneratedMachineConfig(pool *mcfgv1.MachineConfigPoo if err != nil { return err } - _, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), newPool, metav1.UpdateOptions{}) - return err + newAnnos := make(map[string]string) + newAnnos["machineconfiguration.openshift.io/editor"] = "machine-config-controller-render" + pool.SetAnnotations(newAnnos) + if _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { + return err + } } newPool.Spec.Configuration.Name = generated.Name // TODO(walters) Use subresource or JSON patch, but the latter isn't supported by the unit test mocks - pool, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), newPool, metav1.UpdateOptions{}) - if err != nil { + newAnnos := make(map[string]string) + newAnnos["machineconfiguration.openshift.io/editor"] = "machine-config-controller-render" + pool.SetAnnotations(newAnnos) + if _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { return err } klog.V(2).Infof("Pool %s: now targeting: %s", pool.Name, pool.Spec.Configuration.Name) diff --git a/pkg/controller/template/template_controller.go b/pkg/controller/template/template_controller.go index 8bdb0389a2..4d6b33ce6c 100644 --- a/pkg/controller/template/template_controller.go +++ b/pkg/controller/template/template_controller.go @@ -54,10 +54,9 @@ var controllerKind = mcfgv1.SchemeGroupVersion.WithKind("ControllerConfig") type Controller struct { templatesDir string - client mcfgclientset.Interface - kubeClient clientset.Interface - eventRecorder record.EventRecorder - + client mcfgclientset.Interface + kubeClient clientset.Interface + eventRecorder record.EventRecorder syncHandler func(ccKey string) error enqueueControllerConfig func(*mcfgv1.ControllerConfig) diff --git a/pkg/controller/template/template_controller_test.go b/pkg/controller/template/template_controller_test.go index f70bde536c..1e024cdbe7 100644 --- a/pkg/controller/template/template_controller_test.go +++ b/pkg/controller/template/template_controller_test.go @@ -25,9 +25,10 @@ import ( "k8s.io/client-go/tools/record" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" informers "github.com/openshift/client-go/machineconfiguration/informers/externalversions" - ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" ) var ( diff --git a/pkg/daemon/constants/constants.go b/pkg/daemon/constants/constants.go index 7f1a42c0b0..6610ebc6f6 100644 --- a/pkg/daemon/constants/constants.go +++ b/pkg/daemon/constants/constants.go @@ -38,8 +38,16 @@ const ( // GeneratedByVersionAnnotationKey is used to tag the controllerconfig to synchronize the MCO and MCC GeneratedByVersionAnnotationKey = "machineconfiguration.openshift.io/generated-by-version" - // MachineConfigDaemonStateWorking is set by daemon when it is applying an update. + // need some additional MachineConfigDaemonStates here... or maybe we need them to be consts of a larger MachineConfigDaemonUpdateStates type + MachineConfigDaemonStateWorkPerparing = "WorkPreparing" + // MachineConfigDaemonStateWorking is set by daemon when it is beginning to apply an update. MachineConfigDaemonStateWorking = "Working" + // MachineConfigDaemonStateWorkPostAction is set by daemon when it is applying an update. + MachineConfigDaemonStateWorkPostAction = "WorkPostAction" + // MachineConfigDaemonStateWorkCompleting is set by daemon when it is applying an update. + MachineConfigDaemonStateWorkCompleting = "WorkCompleting" + // MachineConfigDaemonResuming is set by daemon when it is applying an update. + MachineConfigDaemonResuming = "Resuming" // MachineConfigDaemonStateDone is set by daemon when it is done applying an update. MachineConfigDaemonStateDone = "Done" // MachineConfigDaemonStateDegraded is set by daemon when an error not caused by a bad MachineConfig @@ -49,6 +57,8 @@ const ( MachineConfigDaemonStateUnreconcilable = "Unreconcilable" // MachineConfigDaemonReasonAnnotationKey is set by the daemon when it needs to report a human readable reason for its state. E.g. when state flips to degraded/unreconcilable. MachineConfigDaemonReasonAnnotationKey = "machineconfiguration.openshift.io/reason" + // MachineConfigDaemonPhaseAnnotationKey is set by the daemon alongside the reason annotation key when more detail is needed + MachineConfigDaemonPhaseAnnotationKey = "machineconfiguration.openshift.io/phase" // MachineConfigDaemonFinalizeFailureAnnotationKey is set by the daemon when ostree fails to finalize MachineConfigDaemonFinalizeFailureAnnotationKey = "machineconfiguration.openshift.io/ostree-finalize-staged-failure" // InitialNodeAnnotationsFilePath defines the path at which it will find the node annotations it needs to set on the node once it comes up for the first time. diff --git a/pkg/daemon/daemon.go b/pkg/daemon/daemon.go index e06692b48d..7a9437624f 100644 --- a/pkg/daemon/daemon.go +++ b/pkg/daemon/daemon.go @@ -18,6 +18,9 @@ import ( "syscall" "time" + mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" + ign3types "github.com/coreos/ignition/v2/config/v3_4/types" "github.com/google/renameio" "golang.org/x/time/rate" @@ -28,10 +31,12 @@ import ( "k8s.io/apimachinery/pkg/util/wait" coreinformersv1 "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/klog/v2" corev1lister "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" configv1 "github.com/openshift/api/config/v1" @@ -41,6 +46,7 @@ import ( mcoResourceRead "github.com/openshift/machine-config-operator/lib/resourceread" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/pkg/daemon/constants" + "github.com/openshift/machine-config-operator/pkg/daemon/osrelease" ) @@ -76,6 +82,8 @@ type Daemon struct { // kubeClient allows interaction with Kubernetes, including the node we are running on. kubeClient kubernetes.Interface + mcfgClient mcfgclientset.Interface + // nodeLister is used to watch for updates via the informer nodeLister corev1lister.NodeLister nodeListerSynced cache.InformerSynced @@ -97,6 +105,8 @@ type Daemon struct { nodeWriter NodeWriter + featureGatesAccessor featuregates.FeatureGateAccess + // channel used by callbacks to signal Run() of an error exitCh chan<- error @@ -301,14 +311,21 @@ func New( func (dn *Daemon) ClusterConnect( name string, kubeClient kubernetes.Interface, + mcfgClient mcfgclientset.Interface, mcInformer mcfginformersv1.MachineConfigInformer, nodeInformer coreinformersv1.NodeInformer, ccInformer mcfginformersv1.ControllerConfigInformer, kubeletHealthzEnabled bool, kubeletHealthzEndpoint string, + featureGatesAccessor featuregates.FeatureGateAccess, ) error { dn.name = name dn.kubeClient = kubeClient + dn.mcfgClient = mcfgClient + + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.V(2).Infof) + eventBroadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: kubeClient.CoreV1().Events("openshift-machine-config-operator")}) // Other controllers start out with the default controller limiter which retries // in milliseconds; since any change here will involve rebooting the node @@ -349,6 +366,8 @@ func (dn *Daemon) ClusterConnect( dn.kubeletHealthzEnabled = kubeletHealthzEnabled dn.kubeletHealthzEndpoint = kubeletHealthzEndpoint + dn.featureGatesAccessor = featureGatesAccessor + return nil } @@ -655,6 +674,18 @@ func (dn *Daemon) syncNode(key string) error { // Deep-copy otherwise we are mutating our cache. node = node.DeepCopy() + // this is going to be weird + // events need to be done on an object + /* + annos := make(map[string]string) + annos["ms"] = "DaemonState" //might need this might not + annos["state"] = "StateControllerSyncDaemon" + annos["ObjectKind"] = string(mcfgv1.Node) + annos["ObjectName"] = node.Name + + dn.EmitHealthEvent(dn.stateControllerPod, annos, corev1.EventTypeNormal, "GotNode", fmt.Sprintf("Got node %s for MCD", node.Name)) + */ + // dn.daemonHealthEvents.Eventf( if dn.node == nil { dn.node = node if err := dn.initializeNode(); err != nil { @@ -667,10 +698,10 @@ func (dn *Daemon) syncNode(key string) error { oldReason := dn.node.Annotations[constants.MachineConfigDaemonReasonAnnotationKey] newReason := node.Annotations[constants.MachineConfigDaemonReasonAnnotationKey] if oldState != newState { - klog.Infof("Transitioned from state: %v -> %v", oldState, newState) + // dn.EmitHealthEvent(dn.stateControllerPod, annos, corev1.EventTypeNormal, "UpdatingStateAnnotations", fmt.Sprintf("Updating MCO State from %s to %s", oldState, newState)) } if oldReason != newReason { - klog.Infof("Transitioned from degraded/unreconcilable reason %v -> %v", oldReason, newReason) + // dn.EmitHealthEvent(dn.stateControllerPod, annos, corev1.EventTypeNormal, "UpdatingReasonAnnotations", fmt.Sprintf("Updating MCO State Transition Reasons from %s to %s", oldReason, newReason)) } dn.node = node } @@ -720,17 +751,34 @@ func (dn *Daemon) syncNode(key string) error { } if ufc != nil { + /* + annos := map[string]string{ + constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonStateWorkPerparing, + constants.MachineConfigDaemonPhaseAnnotationKey: string(mcfgalphav1.MachineConfigNodeUpdateCompatible), + constants.MachineConfigDaemonReasonAnnotationKey: fmt.Sprintf("Detecting diff in current: %s and desired %s MC. Preparing upgrade", ufc.currentConfig.Name, ufc.desiredConfig.Name), + } + dn.nodeWriter.SetAnnotations(annos) + */ // Only check for config drift if we need to update. if err := dn.runPreflightConfigDriftCheck(); err != nil { return err } + //dn.EmitHealthEvent(dn.stateControllerPod, annos, corev1.EventTypeNormal, "TriggeringUpdate", fmt.Sprintf("Updating MCO to new MachineConfig %s", ufc.desiredConfig.Name)) if err := dn.triggerUpdate(ufc.currentConfig, ufc.desiredConfig, ufc.currentImage, ufc.desiredImage); err != nil { + /* annos = map[string]string{ + constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonStateDegraded, + constants.MachineConfigDaemonPhaseAnnotationKey: string(mcfgalphav1.MachineConfigNodeErrored), + constants.MachineConfigDaemonReasonAnnotationKey: err.Error(), + } + dn.nodeWriter.SetAnnotations(annos) + */ + //dn.EmitUpgradeEvent(dn.stateControllerPod, dn.UpgradeAnnotations(v1.MachineConfigNodeErrored), corev1.EventTypeWarning, "UpdateError", fmt.Sprintf("Error Updating to new MachineConfig %s", ufc.desiredConfig.Name)) return err } } - klog.V(2).Infof("Node %s is already synced", node.Name) + //dn.EmitUpgradeEvent(dn.stateControllerPod, dn.UpgradeAnnotations(v1.MachineConfigPoolReady), corev1.EventTypeNormal, "NodeUpdated", fmt.Sprintf("Node %s is up to date", dn.nodeName())) return nil } @@ -915,15 +963,18 @@ func (dn *Daemon) syncNodeHypershift(key string) error { return nil } // Assume an update is completed. Set node state to done. Also request an uncordon + annos := map[string]string{ constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonStateDone, constants.MachineConfigDaemonReasonAnnotationKey: "", - constants.CurrentMachineConfigAnnotationKey: targetHash, - constants.DesiredDrainerAnnotationKey: fmt.Sprintf("%s-%s", constants.DrainerStateUncordon, targetHash), + // constants.MachineConfigDaemonPhaseAnnotationKey: string(mcfgalphav1.MachineConfigPoolUpdateCordoning), + constants.CurrentMachineConfigAnnotationKey: targetHash, + constants.DesiredDrainerAnnotationKey: fmt.Sprintf("%s-%s", constants.DrainerStateUncordon, targetHash), } if _, err := dn.nodeWriter.SetAnnotations(annos); err != nil { return fmt.Errorf("failed to set Done annotation on node: %w", err) } + klog.Infof("The pod has completed update. Awaiting removal.") // TODO os.Exit here return nil @@ -957,7 +1008,8 @@ func (dn *Daemon) syncNodeHypershift(key string) error { if node.Annotations[constants.DesiredDrainerAnnotationKey] != targetDrainValue { // Make a request to perform drain annos := map[string]string{ - constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonStateWorking, + constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonStateWorking, + //constants.MachineConfigDaemonPhaseAnnotationKey: string(mcfgalphav1.MachineConfigPoolUpdateDraining), constants.MachineConfigDaemonReasonAnnotationKey: "", constants.DesiredDrainerAnnotationKey: targetDrainValue, } @@ -2229,6 +2281,13 @@ func (dn *Daemon) triggerUpdateWithMachineConfig(currentConfig, desiredConfig *m // and the config will "drift" while the update is occurring. dn.stopConfigDriftMonitor() + // we need to do this but it needs to reach into the controller.... + // without annotations + // dn.UpgradeProgression + //dn.setPoolHealthProgression(v1.MachineConfigNodeUpdatePrepared, "stopping config drift monitor", "") + + //dn.EmitUpgradeEvent(dn.stateControllerPod, dn.UpgradeAnnotations(v1.MachineConfigNodeUpdatePrepared), corev1.EventTypeNormal, "StoppingConfigDriftMonitor", "Daemon is stopping the config drift monitor") + // run the update process. this function doesn't currently return. return dn.update(currentConfig, desiredConfig, skipCertificateWrite) } diff --git a/pkg/daemon/daemon_test.go b/pkg/daemon/daemon_test.go index c13f635b52..50101a8c45 100644 --- a/pkg/daemon/daemon_test.go +++ b/pkg/daemon/daemon_test.go @@ -152,11 +152,13 @@ func (f *fixture) newController() *Daemon { } d.ClusterConnect("node_name_test", f.kubeclient, + f.client, i.Machineconfiguration().V1().MachineConfigs(), k8sI.Core().V1().Nodes(), i.Machineconfiguration().V1().ControllerConfigs(), false, "", + d.featureGatesAccessor, ) d.mcListerSynced = alwaysReady diff --git a/pkg/daemon/drain.go b/pkg/daemon/drain.go index 238b02d64f..a09b5964d6 100644 --- a/pkg/daemon/drain.go +++ b/pkg/daemon/drain.go @@ -9,8 +9,10 @@ import ( "github.com/BurntSushi/toml" "github.com/containers/image/v5/pkg/sysregistriesv2" ign3types "github.com/coreos/ignition/v2/config/v3_4/types" + mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/pkg/daemon/constants" + "github.com/openshift/machine-config-operator/pkg/upgrademonitor" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -43,6 +45,18 @@ func (dn *Daemon) performDrain() error { return err } desiredDrainAnnotationValue := fmt.Sprintf("%s-%s", "drain", desiredConfigName) + err = upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateExecuted, Reason: string(mcfgalphav1.MachineConfigNodeUpdateDrained), Message: fmt.Sprintf("Draining Node as part of In progress upgrade phase")}, + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateDrained, Reason: fmt.Sprintf("%s%s", string(mcfgalphav1.MachineConfigNodeUpdateExecuted), string(mcfgalphav1.MachineConfigNodeUpdateDrained)), Message: fmt.Sprintf("Draining node. The drain will not be complete until desired drainer %s matches current drainer %s", desiredDrainAnnotationValue, dn.node.Annotations[constants.LastAppliedDrainerAnnotationKey])}, + metav1.ConditionUnknown, + metav1.ConditionUnknown, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) + if err != nil { + return err + } if dn.node.Annotations[constants.LastAppliedDrainerAnnotationKey] == desiredDrainAnnotationValue { // We should only enter this in one of three scenarios: // A previous drain timed out, and the controller succeeded while we waited for the next sync @@ -85,12 +99,31 @@ func (dn *Daemon) performDrain() error { if wait.Interrupted(err) { failMsg := fmt.Sprintf("failed to drain node: %s after 1 hour. Please see machine-config-controller logs for more information", dn.node.Name) dn.nodeWriter.Eventf(corev1.EventTypeWarning, "FailedToDrain", failMsg) + err = upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateExecuted, Reason: string(mcfgalphav1.MachineConfigNodeUpdateDrained), Message: fmt.Sprintf("Node Drain has failed")}, + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateDrained, Reason: fmt.Sprintf("%s%s", string(mcfgalphav1.MachineConfigNodeUpdateExecuted), string(mcfgalphav1.MachineConfigNodeUpdateDrained)), Message: fmt.Sprintf("Node Drain has failed. Error is: %s The drain will not be complete until desired drainer %s matches current drainer %s", failMsg, desiredDrainAnnotationValue, dn.node.Annotations[constants.LastAppliedDrainerAnnotationKey])}, + metav1.ConditionUnknown, + metav1.ConditionUnknown, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) return fmt.Errorf(failMsg) + } return fmt.Errorf("Something went wrong while attempting to drain node: %v", err) } logSystem("drain complete") + err = upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateExecuted, Reason: string(mcfgalphav1.MachineConfigNodeUpdateDrained), Message: fmt.Sprintf("Draining Node as part of In progress upgrade phase")}, + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateDrained, Reason: fmt.Sprintf("%s%s", string(mcfgalphav1.MachineConfigNodeUpdateExecuted), string(mcfgalphav1.MachineConfigNodeUpdateDrained)), Message: fmt.Sprintf("Draining node. The drain will not be complete until desired drainer %s matches current drainer %s", desiredDrainAnnotationValue, dn.node.Annotations[constants.LastAppliedDrainerAnnotationKey])}, + metav1.ConditionUnknown, + metav1.ConditionTrue, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) t := time.Since(startTime).Seconds() klog.Infof("Successful drain took %v seconds", t) diff --git a/pkg/daemon/update.go b/pkg/daemon/update.go index 5fd5441861..919809c597 100644 --- a/pkg/daemon/update.go +++ b/pkg/daemon/update.go @@ -27,10 +27,13 @@ import ( "k8s.io/klog/v2" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/pkg/daemon/constants" pivottypes "github.com/openshift/machine-config-operator/pkg/daemon/pivot/types" pivotutils "github.com/openshift/machine-config-operator/pkg/daemon/pivot/utils" + "github.com/openshift/machine-config-operator/pkg/upgrademonitor" ) const ( @@ -75,6 +78,19 @@ func reloadService(name string) error { // If at any point an error occurs, we reboot the node so that node has correct configuration. func (dn *Daemon) performPostConfigChangeAction(postConfigChangeActions []string, configName string) error { if ctrlcommon.InSlice(postConfigChangeActionReboot, postConfigChangeActions) { + err := upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdatePostActionComplete, Reason: string(mcfgalphav1.MachineConfigNodeUpdateRebooted), Message: fmt.Sprintf("Node will reboot into config %s", configName)}, + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateRebooted, Reason: fmt.Sprintf("%s%s", string(mcfgalphav1.MachineConfigNodeUpdateRebooted), string(mcfgalphav1.MachineConfigNodeUpdateRebooted)), Message: "Upgrade requires a reboot. Currently doing this as the post update action."}, + metav1.ConditionUnknown, + metav1.ConditionUnknown, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) + if err != nil { + return err + } + //dn.EmitUpgradeEvent(dn.stateControllerPod, dn.UpgradeAnnotations(mcfgv1.MachineConfigNodeUpdatePostActionComplete), corev1.EventTypeNormal, "RebootingNode", fmt.Sprintf("Rebooting node %s during upgrade", dn.nodeName())) logSystem("Rebooting node") return dn.reboot(fmt.Sprintf("Node will reboot into config %s", configName)) } @@ -88,7 +104,28 @@ func (dn *Daemon) performPostConfigChangeAction(postConfigChangeActions []string if ctrlcommon.InSlice(postConfigChangeActionReloadCrio, postConfigChangeActions) { serviceName := "crio" - + //annos := map[string]string{ + //constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonStateWorkPostAction, + //constants.MachineConfigDaemonPhaseAnnotationKey: string(mcfgalphav1.r), + //constants.MachineConfigDaemonReasonAnnotationKey: "Reloading CRIO, upgrade does not require reboot.", + //} + //_, err := dn.nodeWriter.SetAnnotations(annos) + //if err != nil { + // return err + //} + err := upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdatePostActionComplete, Reason: string(mcfgalphav1.MachineConfigNodeUpdateReloaded), Message: "Node will reload CRIO"}, + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateReloaded, Reason: fmt.Sprintf("%s%s", string(mcfgalphav1.MachineConfigNodeUpdatePostActionComplete), string(mcfgalphav1.MachineConfigNodeUpdateReloaded)), Message: "Upgrade requires a CRIO reload. Currently doing this as the post update action."}, + metav1.ConditionUnknown, + metav1.ConditionUnknown, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) + if err != nil { + return err + } + //dn.EmitUpgradeEvent(dn.stateControllerPod, dn.UpgradeAnnotations(mcfgv1.MachineConfigNodeUpdatePostActionComplete), corev1.EventTypeNormal, "RealoadingCRIO", fmt.Sprintf("Realoding CRIO on node %s as part of upgrade", dn.nodeName())) if err := reloadService(serviceName); err != nil { if dn.nodeWriter != nil { dn.nodeWriter.Eventf(corev1.EventTypeWarning, "FailedServiceReload", fmt.Sprintf("Reloading %s service failed. Error: %v", serviceName, err)) @@ -96,6 +133,19 @@ func (dn *Daemon) performPostConfigChangeAction(postConfigChangeActions []string return fmt.Errorf("could not apply update: reloading %s configuration failed. Error: %w", serviceName, err) } + err = upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdatePostActionComplete, Reason: string(mcfgalphav1.MachineConfigNodeUpdateReloaded), Message: "Node has reloaded CRIO"}, + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateReloaded, Reason: fmt.Sprintf("%s%s", string(mcfgalphav1.MachineConfigNodeUpdatePostActionComplete), string(mcfgalphav1.MachineConfigNodeUpdateReloaded)), Message: "Upgrade required a CRIO reload. Completed this this as the post update action."}, + metav1.ConditionTrue, + metav1.ConditionTrue, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) + if err != nil { + return err + } + if dn.nodeWriter != nil { dn.nodeWriter.Eventf(corev1.EventTypeNormal, "SkipReboot", "Config changes do not require reboot. Service %s was reloaded.", serviceName) } @@ -121,6 +171,28 @@ func (dn *Daemon) performPostConfigChangeAction(postConfigChangeActions []string } if inDesiredConfig { + //annos := map[string]string{ + //constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonResuming, + //constants.MachineConfigDaemonPhaseAnnotationKey: string(mcfgalphav1.MachineConfigNodeResumed), + //constants.MachineConfigDaemonReasonAnnotationKey: fmt.Sprintf("In desired config %s. Resuming normal operations.", state.currentConfig.Name), + //} + //_, err := dn.nodeWriter.SetAnnotations(annos) + //if err != nil { + // return err + //} + err = upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeResumed, Reason: string(mcfgalphav1.MachineConfigNodeResumed), Message: fmt.Sprintf("In desired config %s. Resuming normal operations.", state.currentConfig.Name)}, + nil, + metav1.ConditionFalse, + metav1.ConditionTrue, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) + if err != nil { + return err + } + //dn.EmitUpgradeEvent(dn.stateControllerPod, dn.UpgradeAnnotations(mcfgv1.MachineConfigNodeUpdateComplete), corev1.EventTypeNormal, "ResumingConfigDrift", fmt.Sprintf("Resuming ConfigDrift Monitor, signaling the completion of update on node %s", dn.nodeName())) // (re)start the config drift monitor since rebooting isn't needed. dn.startConfigDriftMonitor() return nil @@ -405,7 +477,7 @@ func (dn *Daemon) updateOnClusterBuild(oldConfig, newConfig *mcfgv1.MachineConfi return err } if state != constants.MachineConfigDaemonStateDegraded && state != constants.MachineConfigDaemonStateUnreconcilable { - if err := dn.nodeWriter.SetWorking(); err != nil { + if err := dn.nodeWriter.SetWorking("Upgrading Image", string(mcfgalphav1.MachineConfigNodeUpdateFilesAndOS)); err != nil { return fmt.Errorf("error setting node's state to Working: %w", err) } } @@ -571,7 +643,7 @@ func (dn *Daemon) update(oldConfig, newConfig *mcfgv1.MachineConfig, skipCertifi return err } if state != constants.MachineConfigDaemonStateDegraded && state != constants.MachineConfigDaemonStateUnreconcilable { - if err := dn.nodeWriter.SetWorking(); err != nil { + if err := dn.nodeWriter.SetWorking(fmt.Sprintf("Machineconfigs %s and %s", oldConfig.Name, newConfig.Name), string(mcfgalphav1.MachineConfigNodeUpdateCompatible)); err != nil { return fmt.Errorf("error setting node's state to Working: %w", err) } } @@ -598,6 +670,20 @@ func (dn *Daemon) update(oldConfig, newConfig *mcfgv1.MachineConfig, skipCertifi klog.Infof("Checking Reconcilable for config %v to %v", oldConfigName, newConfigName) + err = upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdatePrepared, Reason: string(mcfgalphav1.MachineConfigNodeUpdateCompatible), Message: fmt.Sprintf("comparing MachineConfigs as part of the preparation phase")}, + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateCompatible, Reason: fmt.Sprintf("%s%s", string(mcfgalphav1.MachineConfigNodeUpdatePrepared), string(mcfgalphav1.MachineConfigNodeUpdateCompatible)), Message: fmt.Sprintf("Checking Reconcilable for config %v to %v", oldConfigName, newConfigName)}, + metav1.ConditionUnknown, + metav1.ConditionUnknown, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) + if err != nil { + return err + } + // checking for reconcilability + //dn.EmitUpgradeEvent(dn.stateControllerPod, dn.UpgradeAnnotations(mcfgv1.MachineConfigNodeUpdatePrepared), corev1.EventTypeNormal, "ReconcilingConfigs", fmt.Sprintf("Checking if %s and %s are reconcilable", oldConfig.Name, newConfig.Name)) // make sure we can actually reconcile this state diff, reconcilableError := reconcilable(oldConfig, newConfig) @@ -622,7 +708,29 @@ func (dn *Daemon) update(oldConfig, newConfig *mcfgv1.MachineConfig, skipCertifi if err != nil { return err } + err = upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdatePrepared, Reason: string(mcfgalphav1.MachineConfigNodeUpdateCompatible), Message: "Update is Compatible."}, + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateCompatible, Reason: fmt.Sprintf("%s%s", string(mcfgalphav1.MachineConfigNodeUpdatePrepared), string(mcfgalphav1.MachineConfigNodeUpdateCompatible)), Message: fmt.Sprintf("Update Compatible. Post Cfg Actions %v: Drain Required: %t", actions, drain)}, + metav1.ConditionTrue, + metav1.ConditionTrue, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) + if err != nil { + return err + } if drain { + // annos := map[string]string{ + // constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonStateWorking, + // constants.MachineConfigDaemonReasonAnnotationKey: "DrainingNode", + //} + // _, err = dn.nodeWriter.SetAnnotations(annos) + // if err != nil { + // return err + // } + //dn.EmitUpgradeEvent(dn.stateControllerPod, dn.UpgradeAnnotations(mcfgv1.MachineConfigNodeUpdateExecuted), corev1.EventTypeNormal, "DrainingNode", fmt.Sprintf("Draining Node %s", dn.nodeName())) + if err := dn.performDrain(); err != nil { return err } @@ -630,6 +738,43 @@ func (dn *Daemon) update(oldConfig, newConfig *mcfgv1.MachineConfig, skipCertifi klog.Info("Changes do not require drain, skipping.") } + files := "" + for _, f := range newIgnConfig.Storage.Files { + files += f.Path + " " + } + + osUpdate := "will not" + sshKeyUpdate := "will not" + if diff.passwd { + sshKeyUpdate = "will" + } + if diff.osUpdate || diff.extensions || diff.kernelType { + osUpdate = "will" + } + + // annos := map[string]string{ + // constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonStateWorking, + // constants.MachineConfigDaemonPhaseAnnotationKey: string(mcfgalphav1.MachineConfigNodeUpdateFilesAndOS), + // constants.MachineConfigDaemonReasonAnnotationKey: fmt.Sprintf("Applying files to node. Using new Ignition files: %s", files), + //} + //_, err = dn.nodeWriter.SetAnnotations(annos) + //if err != nil { + // return err + //} + err = upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateExecuted, Reason: string(mcfgalphav1.MachineConfigNodeUpdateFilesAndOS), Message: fmt.Sprintf("Updating the Files and OS on disk as a part of the in progress phase")}, + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateFilesAndOS, Reason: fmt.Sprintf("%s%s", string(mcfgalphav1.MachineConfigNodeUpdateExecuted), string(mcfgalphav1.MachineConfigNodeUpdateFilesAndOS)), Message: fmt.Sprintf("Applying files and new OS config to node. OS %s need an update. SSH Keys %s need an update", osUpdate, sshKeyUpdate)}, + metav1.ConditionUnknown, + metav1.ConditionUnknown, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) + if err != nil { + return err + } + //dn.EmitUpgradeEvent(dn.stateControllerPod, dn.UpgradeAnnotations(mcfgv1.MachineConfigNodeUpdateExecuted), corev1.EventTypeNormal, "UpdatingFiles", fmt.Sprintf("Updating Files on Disk on node %s", dn.nodeName())) + // update files on disk that need updating if err := dn.updateFiles(oldIgnConfig, newIgnConfig, skipCertificateWrite); err != nil { return err @@ -683,6 +828,8 @@ func (dn *Daemon) update(oldConfig, newConfig *mcfgv1.MachineConfig, skipCertifi } }() + //dn.EmitUpgradeEvent(dn.stateControllerPod, dn.UpgradeAnnotations(mcfgv1.MachineConfigNodeUpdateExecuted), corev1.EventTypeNormal, "ApplyngOSChanges", fmt.Sprintf("Applying OS Changes on node %s", dn.nodeName())) + if dn.os.IsCoreOSVariant() { coreOSDaemon := CoreOSDaemon{dn} if err := coreOSDaemon.applyOSChanges(*diff, oldConfig, newConfig); err != nil { @@ -729,6 +876,19 @@ func (dn *Daemon) update(oldConfig, newConfig *mcfgv1.MachineConfig, skipCertifi } }() + err = upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateExecuted, Reason: string(mcfgalphav1.MachineConfigNodeUpdateFilesAndOS), Message: fmt.Sprintf("Updated the Files and OS on disk as a part of the in progress phase")}, + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateFilesAndOS, Reason: fmt.Sprintf("%s%s", string(mcfgalphav1.MachineConfigNodeUpdateExecuted), string(mcfgalphav1.MachineConfigNodeUpdateFilesAndOS)), Message: fmt.Sprintf("Applied files and new OS config to node. OS %s needed an update. SSH Keys %s needed an update", osUpdate, sshKeyUpdate)}, + metav1.ConditionUnknown, + metav1.ConditionTrue, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) + if err != nil { + return err + } + return dn.performPostConfigChangeAction(actions, newConfig.GetName()) } @@ -2172,6 +2332,15 @@ func (dn *Daemon) cancelSIGTERM() { // on failure to reboot, it throws an error and waits for the operator to try again func (dn *Daemon) reboot(rationale string) error { // Now that everything is done, avoid delaying shutdown. + //annos := map[string]string{ + // constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonStateWorkPostAction, + // constants.MachineConfigDaemonReasonAnnotationKey: "ClosingDaemon", + //} + //_, err := dn.nodeWriter.SetAnnotations(annos) + //if err != nil { + // return err + //} + //dn.EmitUpgradeEvent(dn.stateControllerPod, dn.UpgradeAnnotations(mcfgv1.MachineConfigNodeUpdatePostActionComplete), corev1.EventTypeNormal, "ClosingDaemon", "Closing Daemon") dn.cancelSIGTERM() dn.Close() @@ -2200,7 +2369,15 @@ func (dn *Daemon) reboot(rationale string) error { // and we wait for GracefulNodeShutdown dn.rebootQueued = true logSystem("reboot successful") - return nil + return upgrademonitor.GenerateAndApplyMachineConfigNodes( + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdatePostActionComplete, Reason: string(mcfgalphav1.MachineConfigNodeUpdateRebooted), Message: "Node has rebooted"}, + &upgrademonitor.Condition{State: mcfgalphav1.MachineConfigNodeUpdateRebooted, Reason: fmt.Sprintf("%s%s", string(mcfgalphav1.MachineConfigNodeUpdateRebooted), string(mcfgalphav1.MachineConfigNodeUpdateRebooted)), Message: "Upgrade required a reboot. Completed this as the post update action."}, + metav1.ConditionTrue, + metav1.ConditionTrue, + dn.node, + dn.mcfgClient, + dn.featureGatesAccessor, + ) } func (dn *CoreOSDaemon) applyLayeredOSChanges(mcDiff machineConfigDiff, oldConfig, newConfig *mcfgv1.MachineConfig) (retErr error) { diff --git a/pkg/daemon/upgrade_monitor_test.go b/pkg/daemon/upgrade_monitor_test.go new file mode 100644 index 0000000000..b246b1e995 --- /dev/null +++ b/pkg/daemon/upgrade_monitor_test.go @@ -0,0 +1,163 @@ +package daemon + +import ( + "context" + "testing" + + apicfgv1 "github.com/openshift/api/config/v1" + "github.com/openshift/machine-config-operator/pkg/upgrademonitor" + + "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" + informers "github.com/openshift/client-go/machineconfiguration/informers/externalversions" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kubeinformers "k8s.io/client-go/informers" + k8sfake "k8s.io/client-go/kubernetes/fake" +) + +type upgradeMonitorTestCase struct { + name string + err bool + parentCondition *upgrademonitor.Condition + childCondition *upgrademonitor.Condition + parentStatus metav1.ConditionStatus + childStatus metav1.ConditionStatus + expectedConditions []metav1.Condition +} + +func TestUpgradeMonitor(t *testing.T) { + testCases := []upgradeMonitorTestCase{ + { + name: "testUpdated", + err: false, + parentCondition: &upgrademonitor.Condition{ + State: v1alpha1.MachineConfigNodeUpdated, + Reason: "Updated", + Message: "Node Updated", + }, + childCondition: nil, + parentStatus: metav1.ConditionTrue, + childStatus: metav1.ConditionFalse, + expectedConditions: []metav1.Condition{ + { + Type: string(v1alpha1.MachineConfigNodeUpdated), + Message: "Node Updated", + Reason: "Updated", + LastTransitionTime: metav1.Now(), + Status: metav1.ConditionTrue, + }, + }, + }, + { + name: "testUpdating", + err: false, + parentCondition: &upgrademonitor.Condition{ + State: v1alpha1.MachineConfigNodeUpdateExecuted, + Reason: "Updating", + Message: "Node Updating", + }, + childCondition: &upgrademonitor.Condition{ + State: v1alpha1.MachineConfigNodeUpdateFilesAndOS, + Reason: "FilesAndOS", + Message: "Applied Files and OS", + }, + parentStatus: metav1.ConditionUnknown, + childStatus: metav1.ConditionTrue, + expectedConditions: []metav1.Condition{ + { + Type: string(v1alpha1.MachineConfigNodeUpdateExecuted), + Message: "Node Updating", + Reason: "Updating", + LastTransitionTime: metav1.Now(), + Status: metav1.ConditionUnknown, + }, + { + Type: string(v1alpha1.MachineConfigNodeUpdateFilesAndOS), + Message: "Applied new Files and OS", + Reason: "FilesAndOS", + LastTransitionTime: metav1.Now(), + Status: metav1.ConditionTrue, + }, + }, + }, + } + for _, testCase := range testCases { + // Wire up the mutex to each test case before executing so they don't stomp + // on each other. + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + testCase.run(t) + }) + } +} + +// Runs the test case +func (tc upgradeMonitorTestCase) run(t *testing.T) { + stopCh := make(chan struct{}) + defer close(stopCh) + f := &fixture{} + f.t = t + f.objects = []runtime.Object{} + f.kubeobjects = []runtime.Object{} + f.client = fake.NewSimpleClientset(f.objects...) + f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...) + fgAccess := featuregates.NewHardcodedFeatureGateAccess( + []apicfgv1.FeatureGateName{ + apicfgv1.FeatureGateMachineConfigNodes, + }, + []apicfgv1.FeatureGateName{ + apicfgv1.FeatureGateExternalCloudProvider, + apicfgv1.FeatureGateExternalCloudProviderAzure, + apicfgv1.FeatureGateExternalCloudProviderGCP, + apicfgv1.FeatureGateExternalCloudProviderExternal, + }, + ) + + i := informers.NewSharedInformerFactory(f.client, noResyncPeriodFunc()) + k8sI := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc()) + + i.Start(stopCh) + i.WaitForCacheSync(stopCh) + k8sI.Start(stopCh) + k8sI.WaitForCacheSync(stopCh) + + d, err := New(nil) + if err != nil { + f.t.Fatalf("can't bring up daemon: %v", err) + } + d.ClusterConnect("node_name_test", + f.kubeclient, + f.client, + i.Machineconfiguration().V1().MachineConfigs(), + k8sI.Core().V1().Nodes(), + i.Machineconfiguration().V1().ControllerConfigs(), + false, + "", + fgAccess, + ) + + err = upgrademonitor.GenerateAndApplyMachineConfigNodes(tc.parentCondition, tc.childCondition, tc.parentStatus, tc.childStatus, d.node, d.mcfgClient, d.featureGatesAccessor) + if err != nil { + f.t.Fatalf("Could not generate and apply MCN %v", err) + } + + n, err := d.mcfgClient.MachineconfigurationV1alpha1().MachineConfigNodes().Get(context.TODO(), "node_name_test", metav1.GetOptions{}) + if err != nil { + f.t.Fatalf("can't bring up daemon: %v", err) + } + + for _, expectedCond := range tc.expectedConditions { + for _, cond := range n.Status.Conditions { + if cond.Type == expectedCond.Type { + if cond.Status != expectedCond.Status { + f.t.Fatalf("Conditions do not match %s an %s", string(cond.Status), string(expectedCond.Status)) + } + } + } + } + +} diff --git a/pkg/daemon/writer.go b/pkg/daemon/writer.go index 16c368c1a7..008e2651db 100644 --- a/pkg/daemon/writer.go +++ b/pkg/daemon/writer.go @@ -58,7 +58,7 @@ type clusterNodeWriter struct { type NodeWriter interface { Run(stop <-chan struct{}) SetDone(*stateAndConfigs) error - SetWorking() error + SetWorking(string, string) error SetUnreconcilable(err error) error SetDegraded(err error) error SetAnnotations(annos map[string]string) (*corev1.Node, error) @@ -183,9 +183,11 @@ func (nw *clusterNodeWriter) SetDone(state *stateAndConfigs) error { } // SetWorking sets the state to Working. -func (nw *clusterNodeWriter) SetWorking() error { +func (nw *clusterNodeWriter) SetWorking(reason string, phase string) error { annos := map[string]string{ - constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonStateWorking, + constants.MachineConfigDaemonStateAnnotationKey: constants.MachineConfigDaemonStateWorking, + constants.MachineConfigDaemonReasonAnnotationKey: reason, + constants.MachineConfigDaemonPhaseAnnotationKey: phase, } UpdateStateMetric(mcdState, constants.MachineConfigDaemonStateWorking, "") respChan := make(chan response, 1) diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 1a29d86d00..5ac83377f2 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -5,6 +5,12 @@ import ( "fmt" "time" + v1 "github.com/openshift/api/config/v1" + opv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" + + mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + opv1clientset "github.com/openshift/client-go/operator/clientset/versioned" "k8s.io/klog/v2" configclientset "github.com/openshift/client-go/config/clientset/versioned" @@ -38,7 +44,9 @@ import ( mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" + mcfginformersalphav1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" + mcfglistersalphav1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" ) const ( @@ -63,8 +71,11 @@ type Operator struct { vStore *versionStore + operatorHealthEvents record.EventRecorder + client mcfgclientset.Interface kubeClient kubernetes.Interface + opv1Client opv1clientset.Interface apiExtClient apiextclientset.Interface configClient configclientset.Interface eventRecorder record.EventRecorder @@ -75,6 +86,7 @@ type Operator struct { imgLister configlistersv1.ImageLister crdLister apiextlistersv1.CustomResourceDefinitionLister mcpLister mcfglistersv1.MachineConfigPoolLister + msLister mcfglistersalphav1.MachineConfigNodeLister ccLister mcfglistersv1.ControllerConfigLister mcLister mcfglistersv1.MachineConfigLister deployLister appslisterv1.DeploymentLister @@ -122,6 +134,8 @@ type Operator struct { stopCh <-chan struct{} renderConfig *renderConfig + + fgAccessor featuregates.FeatureGateAccess } // New returns a new machine config operator. @@ -146,6 +160,7 @@ func New( kubeClient kubernetes.Interface, apiExtClient apiextclientset.Interface, configClient configclientset.Interface, + opv1Client opv1clientset.Interface, oseKubeAPIInformer coreinformersv1.ConfigMapInformer, nodeInformer coreinformersv1.NodeInformer, maoSecretInformer coreinformersv1.SecretInformer, @@ -154,6 +169,8 @@ func New( mcoSecretInformer coreinformersv1.SecretInformer, ocSecretInformer coreinformersv1.SecretInformer, mcoCOInformer configinformersv1.ClusterOperatorInformer, + msInformer mcfginformersalphav1.MachineConfigNodeInformer, + fgAccess featuregates.FeatureGateAccess, ) *Operator { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) @@ -165,6 +182,7 @@ func New( imagesFile: imagesFile, vStore: newVersionStore(), client: client, + opv1Client: opv1Client, kubeClient: kubeClient, apiExtClient: apiExtClient, configClient: configClient, @@ -175,7 +193,17 @@ func New( Namespace: ctrlcommon.MCONamespace, APIVersion: "apps/v1", }), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineconfigoperator"), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineconfigoperator"), + fgAccessor: fgAccess, + } + + err := corev1.AddToScheme(scheme.Scheme) + if err != nil { + klog.Errorf("Could not modify scheme: %w", err) + } + err = opv1.AddToScheme(scheme.Scheme) + if err != nil { + klog.Errorf("Could not modify scheme: %w", err) } for _, i := range []cache.SharedIndexInformer{ @@ -402,5 +430,25 @@ func (optr *Operator) sync(key string) error { // this check must always run last since it makes sure the pools are in sync/upgrading correctly {"RequiredPools", optr.syncRequiredMachineConfigPools}, } + + // if FG is enabled, put syncMCN in there + fg, err := optr.fgAccessor.CurrentFeatureGates() + if err != nil { + klog.Errorf("Could not get fg: %w", err) + return err + } + if fg.Enabled(v1.FeatureGateMachineConfigNodes) { + syncFuncs = append(syncFuncs[:1], append([]syncFunc{{"MachineConfigNode", optr.syncMachineConfigNodes}}, syncFuncs[1:]...)...) + } return optr.syncAll(syncFuncs) } + +func (op *Operator) HealthAnnotations(object string, objectType string, kind mcfgalphav1.StateProgress) map[string]string { + annos := make(map[string]string) + annos["ms"] = "OperatorHealth" //might need this might not + annos["state"] = string(kind) + annos["ObjectName"] = object + annos["ObjectKind"] = objectType + + return annos +} diff --git a/pkg/operator/status.go b/pkg/operator/status.go index 12a7e646c2..01f1155924 100644 --- a/pkg/operator/status.go +++ b/pkg/operator/status.go @@ -8,6 +8,8 @@ import ( "strings" "time" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + configv1 "github.com/openshift/api/config/v1" cov1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" corev1 "k8s.io/api/core/v1" @@ -18,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/klog/v2" - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" v1 "github.com/openshift/api/machineconfiguration/v1" "github.com/openshift/machine-config-operator/pkg/apihelpers" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" @@ -303,9 +304,40 @@ func (optr *Operator) syncUpgradeableStatus() error { var updating, degraded bool for _, pool := range pools { // collect updating status but continue to check each pool to see if any pool is degraded + //updateInProg, err := state.IsUpgradingProgressionTrue(mcfgalphav1.MachineConfigNodeUpdateExecuted, *pool, optr.msLister, optr.apiExtClient) if isPoolStatusConditionTrue(pool, mcfgv1.MachineConfigPoolUpdating) { updating = true } + if err != nil { + klog.Errorf("error on Upgrading Progression %w", err) + return err + } + /* + updatePrep, err := state.IsUpgradingProgressionTrue(mcfgalphav1.MachineConfigNodeUpdatePrepared, *pool, optr.msLister, optr.apiExtClient) + if err != nil { + klog.Errorf("error on Upgrading Progression %w", err) + return err + } + + updateFinishing, err := state.IsUpgradingProgressionTrue(mcfgalphav1.MachineConfigNodeUpdateComplete, *pool, optr.msLister, optr.apiExtClient) + if err != nil { + klog.Errorf("error on Upgrading Progression %w", err) + return err + } + + updatePostAction, err := state.IsUpgradingProgressionTrue(mcfgalphav1.MachineConfigNodeUpdatePostActionComplete, *pool, optr.msLister, optr.apiExtClient) + if err != nil { + klog.Errorf("error on Upgrading Progression %w", err) + return err + } + + updating = updatePrep || updateInProg || updateFinishing || updatePostAction + */ + //degraded, err = state.IsUpgradingProgressionTrue(mcfgalphav1.MachineConfigNodeErrored, *pool, optr.msLister, optr.apiExtClient) + //if err != nil { + // klog.Errorf("error on Upgrading Progression %w", err) + // return err + //} degraded = isPoolStatusConditionTrue(pool, mcfgv1.MachineConfigPoolDegraded) // degraded should get top billing in the clusteroperator status, if we find this, set it and update if degraded { @@ -315,6 +347,7 @@ func (optr *Operator) syncUpgradeableStatus() error { break } } + // this should no longer trigger when adding a node to a pool. It should only trigger if the node actually has to go through an upgrade // updating and degraded can occur together, in that case defer to the degraded Reason that is already set above if updating && !degraded { coStatus.Status = configv1.ConditionFalse diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index 7f4c85982a..f25e84c015 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -35,6 +35,8 @@ import ( configv1 "github.com/openshift/api/config/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" v1 "github.com/openshift/api/machineconfiguration/v1" + v1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" "github.com/openshift/library-go/pkg/operator/resource/resourceread" mcoResourceApply "github.com/openshift/machine-config-operator/lib/resourceapply" @@ -281,6 +283,7 @@ func (optr *Operator) syncRenderConfig(_ *renderConfig) error { } imgRegistryUsrData := []v1.ImageRegistryBundle{} if cfg.Spec.AdditionalTrustedCA.Name != "" { + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations(cfg.Spec.AdditionalTrustedCA.Name, "ConfigMap", v1.OperatorSyncRenderConfig), corev1.EventTypeNormal, "GetAdditionalTrustedCA", "Syncing AdditionalTrustedCA while generating render config") cm, err := optr.clusterCmLister.ConfigMaps("openshift-config").Get(cfg.Spec.AdditionalTrustedCA.Name) if err != nil { klog.Warningf("could not find configmap specified in image.config.openshift.io/cluster with the name %s", cfg.Spec.AdditionalTrustedCA.Name) @@ -313,6 +316,7 @@ func (optr *Operator) syncRenderConfig(_ *renderConfig) error { imgRegistryData := []v1.ImageRegistryBundle{} cm, err := optr.clusterCmLister.ConfigMaps("openshift-config-managed").Get("image-registry-ca") if err == nil { + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations(cm.Name, "ConfigMap", v1.OperatorSyncRenderConfig), corev1.EventTypeNormal, "GetImageRegistryCA", "Syncing ImageRegistryCA while generating render config") newKeys := sets.StringKeySet(cm.Data).List() newBinaryKeys := sets.StringKeySet(cm.BinaryData).List() for _, key := range newKeys { @@ -343,6 +347,8 @@ func (optr *Operator) syncRenderConfig(_ *renderConfig) error { caData[CA.File] = string(CA.Data) } + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations("merged-trusted-image-registry-ca", "ConfigMap", v1.OperatorSyncRenderConfig), corev1.EventTypeNormal, "MergedTrustedCA", "Syncing MergedTrustedImageRegistryCA while generating render config") + cm, err = optr.clusterCmLister.ConfigMaps("openshift-config-managed").Get("merged-trusted-image-registry-ca") if err != nil && !errors.IsNotFound(err) { return err @@ -544,6 +550,8 @@ func (optr *Operator) syncRenderConfig(_ *renderConfig) error { } spec.AdditionalTrustBundle = trustBundle + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations("machine-config-controller", "ControllerConfig", v1.OperatorSyncRenderConfig), corev1.EventTypeNormal, "GenerateCConfig", "Syncing ControllerConfig spec while generating render config") + if err := optr.syncCloudConfig(spec, infra); err != nil { return err } @@ -619,6 +627,8 @@ func (optr *Operator) syncCustomResourceDefinitions() error { } for _, crd := range crds { + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations(crd, "CRD", v1.OperatorSyncRenderConfig), corev1.EventTypeNormal, "SyncCRDs", "Syncing CRDs while generating render config") + crdBytes, err := manifests.ReadFile(crd) if err != nil { return fmt.Errorf("error getting asset %s: %w", crd, err) @@ -644,6 +654,7 @@ func (optr *Operator) syncMachineConfigPools(config *renderConfig) error { "manifests/worker.machineconfigpool.yaml", } for _, mcp := range mcps { + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations(mcp, "MCP", v1.OperatorSyncMCP), corev1.EventTypeNormal, "syncMCP", "Syncing MCPs while generating syncingMachineConfigPools") mcpBytes, err := renderAsset(config, mcp) if err != nil { return err @@ -662,6 +673,8 @@ func (optr *Operator) syncMachineConfigPools(config *renderConfig) error { } // base64.StdEncoding.EncodeToString for _, pool := range pools { + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations(pool.Name, "MCP", v1.OperatorSyncMCP), corev1.EventTypeNormal, "createPointerConfig", "creating pointerConfig for pool while syncingMachineConfigPools") + pointerConfigAsset := newAssetRenderer("pointer-config") pointerConfigAsset.templateData = config.PointerConfig pointerConfigData, err := pointerConfigAsset.render(struct{ Role string }{pool.Name}) @@ -691,6 +704,56 @@ func (optr *Operator) syncMachineConfigPools(config *renderConfig) error { return nil } +// we need to mimic this +func (optr *Operator) syncMachineConfigNodes(config *renderConfig) error { + nodes, err := optr.nodeLister.List(labels.Everything()) + if err != nil { + return err + } + for _, node := range nodes { + var pool string + var ok bool + if pool, ok = node.Labels["node-role.kubernetes.io/worker"]; ok { + pool = "worker" + } else if pool, ok = node.Labels["node-role.kubernetes.io/master"]; ok { + pool = "master" + } + klog.Infof("Applying MachineConfigNode for node %s", node.Name) + newMCS := &v1alpha1.MachineConfigNode{ + Spec: v1alpha1.MachineConfigNodeSpec{ + Node: v1alpha1.MCOObjectReference{ + Name: string(node.Name), + }, + Pool: v1alpha1.MCOObjectReference{ + Name: string(pool), + }, + ConfigVersion: v1alpha1.MachineConfigNodeSpecMachineConfigVersion{ + Desired: "NotYetSet", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "MachineConfigNode", + APIVersion: "machineconfiguration.openshift.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: node.Name, + }, + } + mcsBytes, err := json.Marshal(newMCS) + if err != nil { + klog.Errorf("error rendering asset for MachineConfigNode %w", err) + return err + } + klog.Infof("state to be applied: %s", string(mcsBytes)) + p := mcoResourceRead.ReadMachineConfigNodeV1OrDie(mcsBytes) + _, _, err = mcoResourceApply.ApplyMachineConfigNode(optr.client.MachineconfigurationV1alpha1(), p) + if err != nil { + return err + } + } + return nil +} + func (optr *Operator) applyManifests(config *renderConfig, paths manifestPaths) error { for _, path := range paths.clusterRoles { crBytes, err := renderAsset(config, path) @@ -826,6 +889,7 @@ func (optr *Operator) syncControllerConfig(config *renderConfig) error { if err != nil { return err } + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations("manifests/machineconfigcontroller/controllerconfig.yaml", "Manifest", v1.OperatorSyncMCC), corev1.EventTypeNormal, "applyControllerConfig", "Applying ControllerConfig while updating MCC") cc := mcoResourceRead.ReadControllerConfigV1OrDie(ccBytes) // Propagate our binary version into the controller config to help // suppress rendered config generation until a corresponding @@ -871,10 +935,12 @@ func (optr *Operator) syncMachineConfigController(config *renderConfig) error { mopServiceAccountManifestPath, }, } + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations("machine-config-controller", "Manifest", v1.OperatorSyncMCC), corev1.EventTypeNormal, "syncManifests", "Syncing Manifests while updating MCC") if err := optr.applyManifests(config, paths); err != nil { return fmt.Errorf("failed to apply machine config controller manifests: %w", err) } + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations("manifests/machineconfigcontroller/deployment.yaml", "Deployment", v1.OperatorSyncMCC), corev1.EventTypeNormal, "ApplyDeployment", "Applying deployment for MCC") mccBytes, err := renderAsset(config, "manifests/machineconfigcontroller/deployment.yaml") if err != nil { return err @@ -1096,6 +1162,7 @@ func (optr *Operator) syncMachineConfigDaemon(config *renderConfig) error { mcdKubeRbacProxyConfigMapPath, }, } + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations("machine-config-daemon", "Deployment", v1.OperatorSyncMCD), corev1.EventTypeNormal, "syncManifests", "Applying manifests for MCD") if err := optr.applyManifests(config, paths); err != nil { return fmt.Errorf("failed to apply machine config daemon manifests: %w", err) @@ -1123,6 +1190,8 @@ func (optr *Operator) syncMachineConfigServer(config *renderConfig) error { }, daemonset: mcsDaemonsetManifestPath, } + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations("machine-config-server", "syncManifests", v1.OperatorSyncMCS), corev1.EventTypeNormal, "ApplyDeployment", "Syncing manifests for MCs") + if err := optr.applyManifests(config, paths); err != nil { return fmt.Errorf("failed to apply machine config server manifests: %w", err) } @@ -1145,6 +1214,8 @@ func (optr *Operator) syncRequiredMachineConfigPools(_ *renderConfig) error { requiredMachineCount := 0 for _, pool := range pools { + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations(pool.Name, "MCP", v1.OperatorSyncMCPRequired), corev1.EventTypeNormal, "requiredMCPs", fmt.Sprintf("Adding MCP to Required MCPs. Total is %d", requiredMachineCount)) + klog.Infof("Pool!!! %s", pool.Name) _, hasRequiredPoolLabel := pool.Labels[requiredForUpgradeMachineConfigPoolLabelKey] if hasRequiredPoolLabel { requiredMachineCount += int(pool.Status.MachineCount) @@ -1182,6 +1253,7 @@ func (optr *Operator) syncRequiredMachineConfigPools(_ *renderConfig) error { return false, nil } for _, pool := range pools { + // optr.EmitHealthEvent(optr.stateControllerPod, optr.HealthAnnotations(pool.Name, "MCP", v1.OperatorSyncMCPRequired), corev1.EventTypeNormal, "syncingRequiredMCP", fmt.Sprintf("syncing required MCP inside of wait loop")) degraded := isPoolStatusConditionTrue(pool, mcfgv1.MachineConfigPoolDegraded) if degraded { lastErr = fmt.Errorf("error MachineConfigPool %s is not ready, retrying. Status: (pool degraded: %v total: %d, ready %d, updated: %d, unavailable: %d)", pool.Name, degraded, pool.Status.MachineCount, pool.Status.ReadyMachineCount, pool.Status.UpdatedMachineCount, pool.Status.UnavailableMachineCount) diff --git a/pkg/upgrademonitor/upgrade_monitor.go b/pkg/upgrademonitor/upgrade_monitor.go new file mode 100644 index 0000000000..b3ed08f387 --- /dev/null +++ b/pkg/upgrademonitor/upgrade_monitor.go @@ -0,0 +1,196 @@ +package upgrademonitor + +import ( + "context" + "encoding/json" + "fmt" + + machineconfigurationalphav1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1" + mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + + v1 "github.com/openshift/api/config/v1" + mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +type Condition struct { + State mcfgalphav1.StateProgress + Reason string + Message string +} + +func GenerateAndApplyMachineConfigNodes(ParentCondition *Condition, ChildCondition *Condition, ParentStatus metav1.ConditionStatus, ChildStatus metav1.ConditionStatus, node *corev1.Node, mcfgClient mcfgclientset.Interface, fgAccessor featuregates.FeatureGateAccess) error { + klog.Info("GEN and APPLYING MCN") + klog.Infof("Node for MCN upgrade: %s", node.Name) + fg, err := fgAccessor.CurrentFeatureGates() + if err != nil { + klog.Errorf("Could not get fg: %w", err) + return err + } + if !fg.Enabled(v1.FeatureGateMachineConfigNodes) { + return nil + } + + needNewMCNode := false + mcNode, err := mcfgClient.MachineconfigurationV1alpha1().MachineConfigNodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + if mcNode.Name == "" || (err != nil && apierrors.IsNotFound(err)) { + needNewMCNode = true + } + newMCNode := mcNode.DeepCopy() + newParentCondition := metav1.Condition{ + Type: string(ParentCondition.State), + Status: ParentStatus, + Reason: ParentCondition.Reason, + Message: ParentCondition.Message, + LastTransitionTime: metav1.Now(), + } + var newChildCondition *metav1.Condition + var c2 []byte + if ChildCondition != nil { + newChildCondition = &metav1.Condition{ + Type: string(ChildCondition.State), + Status: ChildStatus, + Reason: ChildCondition.Reason, + Message: ChildCondition.Message, + LastTransitionTime: metav1.Now(), + } + c2, _ = json.Marshal(&newChildCondition) + klog.Infof("new condition 1: %s", string(c2)) + } + + c1, _ := json.Marshal(newParentCondition) + klog.Infof("new condition 1: %s", string(c1)) + + // when do we transition back to false? and do we keep the previous comment? + + reset := false + if newParentCondition.Type == string(mcfgalphav1.MachineConfigNodeUpdated) { + reset = true + } + + allConditionTypes := []mcfgalphav1.StateProgress{ + mcfgalphav1.MachineConfigNodeUpdatePrepared, + mcfgalphav1.MachineConfigNodeUpdateExecuted, + mcfgalphav1.MachineConfigNodeUpdatePostActionComplete, + mcfgalphav1.MachineConfigNodeUpdateComplete, + mcfgalphav1.MachineConfigNodeResumed, + mcfgalphav1.MachineConfigNodeUpdateCompatible, + mcfgalphav1.MachineConfigNodeUpdateDrained, + mcfgalphav1.MachineConfigNodeUpdateFilesAndOS, + mcfgalphav1.MachineConfigNodeUpdateCordoned, + mcfgalphav1.MachineConfigNodeUpdateRebooted, + mcfgalphav1.MachineConfigNodeUpdateReloaded, + mcfgalphav1.MachineConfigNodeUpdated, + } + // create all of the conditions, even the false ones + if newMCNode.Status.Conditions == nil { + newMCNode.Status.Conditions = []metav1.Condition{} + newMCNode.Status.Conditions = append(newMCNode.Status.Conditions, newParentCondition) + if newChildCondition != nil { + newMCNode.Status.Conditions = append(newMCNode.Status.Conditions, *newChildCondition) + } + for _, condType := range allConditionTypes { + found := false + for _, cond := range newMCNode.Status.Conditions { + // if this is one of our two conditions, do not nullify this + if condType == mcfgalphav1.StateProgress(cond.Type) { + found = true + } + } + // else if we do not have this one yet, set it to some sane default. + if !found { + newMCNode.Status.Conditions = append(newMCNode.Status.Conditions, + metav1.Condition{ + Type: string(condType), + Message: fmt.Sprintf("This node has not yet entered the %s phase", string(condType)), + Reason: "NotYetOccured", + LastTransitionTime: metav1.Now(), + Status: metav1.ConditionFalse, + }) + } + } + // else we already have some conditions. Lets update accordingly + } else { + foundChild := false + foundParent := false + // look through all of the conditions for our current ones, updat them accordingly + // also set all other ones to false and update last transition time. + for i, condition := range newMCNode.Status.Conditions { + if newChildCondition != nil && condition.Type == newChildCondition.Type { + foundChild = true + newChildCondition.DeepCopyInto(&condition) + } else if condition.Type == newParentCondition.Type { + foundParent = true + newParentCondition.DeepCopyInto(&condition) + } else if condition.Status != metav1.ConditionFalse && reset { + condition.Status = metav1.ConditionFalse + condition.Message = "Action During Previous Update:" + condition.Message + condition.LastTransitionTime = metav1.Now() + } + condition.DeepCopyInto(&newMCNode.Status.Conditions[i]) + } + // I don't think this'll happen given the above logic, but if somehow we do not have an entry for this yet, add one. + if !foundChild && newChildCondition != nil { + newMCNode.Status.Conditions = append(newMCNode.Status.Conditions, *newChildCondition) + } + if !foundParent { + newMCNode.Status.Conditions = append(newMCNode.Status.Conditions, newParentCondition) + } + } + + newMCNode.Spec.ConfigVersion = mcfgalphav1.MachineConfigNodeSpecMachineConfigVersion{ + Desired: node.Annotations["machineconfiguration.openshift.io/desiredConfig"], + } + + newMCNode.Status.ConfigVersion = mcfgalphav1.MachineConfigNodeStatusMachineConfigVersion{ + Desired: newMCNode.Status.ConfigVersion.Desired, + Current: node.Annotations["machineconfiguration.openshift.io/currentConfig"], + } + if newChildCondition != nil && newChildCondition.Type == string(mcfgalphav1.MachineConfigNodeUpdateCompatible) && newChildCondition.Status == metav1.ConditionTrue { + newMCNode.Status.ConfigVersion.Desired = node.Annotations["machineconfiguration.openshift.io/desiredConfig"] + } + + if !needNewMCNode { + nodeRefApplyConfig := machineconfigurationalphav1.MCOObjectReference().WithName(newMCNode.Spec.Node.Name) + poolRefApplyConfig := machineconfigurationalphav1.MCOObjectReference().WithName(newMCNode.Spec.Pool.Name) + specconfigVersionApplyConfig := machineconfigurationalphav1.MachineConfigNodeSpecMachineConfigVersion().WithDesired(newMCNode.Spec.ConfigVersion.Desired) + statusconfigVersionApplyConfig := machineconfigurationalphav1.MachineConfigNodeStatusMachineConfigVersion().WithCurrent(newMCNode.Status.ConfigVersion.Current).WithDesired(newMCNode.Status.ConfigVersion.Desired) + statusApplyConfig := machineconfigurationalphav1.MachineConfigNodeStatus().WithConditions(newMCNode.Status.Conditions...).WithObservedGeneration(newMCNode.Generation + 1).WithConfigVersion(statusconfigVersionApplyConfig) + specApplyConfig := machineconfigurationalphav1.MachineConfigNodeSpec().WithNode(nodeRefApplyConfig).WithPool(poolRefApplyConfig).WithConfigVersion(specconfigVersionApplyConfig) + mcnodeApplyConfig := machineconfigurationalphav1.MachineConfigNode(newMCNode.Name).WithStatus(statusApplyConfig).WithSpec(specApplyConfig) + applyConfig, _ := json.Marshal(mcnodeApplyConfig) + klog.Infof("Updating Machine State Controller apply config Status to %s", string(applyConfig)) + ms, err := mcfgClient.MachineconfigurationV1alpha1().MachineConfigNodes().ApplyStatus(context.TODO(), mcnodeApplyConfig, metav1.ApplyOptions{FieldManager: "machine-config-operator", Force: true}) + if err != nil { + klog.Errorf("ERROR: %w", err) + return err + } + m, err := json.Marshal(ms) + klog.Infof("MACHINECONFIGNODE: %s", string(m)) + } else { + // there are cases where we get here before the MCO has settled and applied all of the MCnodes. + var pool string + var ok bool + if pool, ok = node.Labels["node-role.kubernetes.io/worker"]; ok { + pool = "worker" + } else if pool, ok = node.Labels["node-role.kubernetes.io/master"]; ok { + pool = "master" + } + + newMCNode.Name = node.Name + newMCNode.Spec.Pool = mcfgalphav1.MCOObjectReference{Name: pool} + newMCNode.Spec.Node = mcfgalphav1.MCOObjectReference{Name: node.Name} + ms, err := mcfgClient.MachineconfigurationV1alpha1().MachineConfigNodes().Create(context.TODO(), newMCNode, metav1.CreateOptions{}) + if err != nil { + klog.Errorf("ERROR: %w", err) + return err + } + m, err := json.Marshal(ms) + klog.Infof("MACHINECONFIGNODE: %s", string(m)) + } + return nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index dc2b7e51e6..4bce5936d5 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -568,29 +568,6 @@ func (p Patch) replace(doc *container, op Operation) error { return errors.Wrapf(err, "replace operation failed to decode path") } - if path == "" { - val := op.value() - - if val.which == eRaw { - if !val.tryDoc() { - if !val.tryAry() { - return errors.Wrapf(err, "replace operation value must be object or array") - } - } - } - - switch val.which { - case eAry: - *doc = &val.ary - case eDoc: - *doc = &val.doc - case eRaw: - return errors.Wrapf(err, "replace operation hit impossible case") - } - - return nil - } - con, key := findObject(doc, path) if con == nil { @@ -657,25 +634,6 @@ func (p Patch) test(doc *container, op Operation) error { return errors.Wrapf(err, "test operation failed to decode path") } - if path == "" { - var self lazyNode - - switch sv := (*doc).(type) { - case *partialDoc: - self.doc = *sv - self.which = eDoc - case *partialArray: - self.ary = *sv - self.which = eAry - } - - if self.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - con, key := findObject(doc, path) if con == nil { diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml index 597bc9996f..b5ffbe03d8 100644 --- a/vendor/github.com/gregjones/httpcache/.travis.yml +++ b/vendor/github.com/gregjones/httpcache/.travis.yml @@ -1,18 +1,19 @@ sudo: false language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - master matrix: allow_failures: - go: master fast_finish: true - include: - - go: 1.10.x - - go: 1.11.x - env: GOFMT=1 - - go: master install: - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). script: - go get -t -v ./... - - if test -n "${GOFMT}"; then gofmt -w -s . && git diff --exit-code; fi + - diff -u <(echo -n) <(gofmt -d .) - go tool vet . - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md index 51e7d23d09..09c9e7c173 100644 --- a/vendor/github.com/gregjones/httpcache/README.md +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -7,8 +7,6 @@ Package httpcache provides a http.RoundTripper implementation that works as a mo It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). -This project isn't actively maintained; it works for what I, and seemingly others, want to do with it, and I consider it "done". That said, if you find any issues, please open a Pull Request and I will try to review it. Any changes now that change the public API won't be considered. - Cache Backends -------------- @@ -21,8 +19,6 @@ Cache Backends - [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. - [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). -If you implement any other backend and wish it to be linked here, please send a PR editing this file. - License ------- diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go index b41a63d1ff..f6a2ec4a53 100644 --- a/vendor/github.com/gregjones/httpcache/httpcache.go +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -416,14 +416,14 @@ func canStaleOnError(respHeaders, reqHeaders http.Header) bool { func getEndToEndHeaders(respHeaders http.Header) []string { // These headers are always hop-by-hop hopByHopHeaders := map[string]struct{}{ - "Connection": {}, - "Keep-Alive": {}, - "Proxy-Authenticate": {}, - "Proxy-Authorization": {}, - "Te": {}, - "Trailers": {}, - "Transfer-Encoding": {}, - "Upgrade": {}, + "Connection": struct{}{}, + "Keep-Alive": struct{}{}, + "Proxy-Authenticate": struct{}{}, + "Proxy-Authorization": struct{}{}, + "Te": struct{}{}, + "Trailers": struct{}{}, + "Transfer-Encoding": struct{}{}, + "Upgrade": struct{}{}, } for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { @@ -433,7 +433,7 @@ func getEndToEndHeaders(respHeaders http.Header) []string { } } endToEndHeaders := []string{} - for respHeader := range respHeaders { + for respHeader, _ := range respHeaders { if _, ok := hopByHopHeaders[respHeader]; !ok { endToEndHeaders = append(endToEndHeaders, respHeader) } diff --git a/vendor/github.com/openshift/api/Dockerfile.rhel8 b/vendor/github.com/openshift/api/Dockerfile.rhel8 index 62fa30b2c0..53c84a87b9 100644 --- a/vendor/github.com/openshift/api/Dockerfile.rhel8 +++ b/vendor/github.com/openshift/api/Dockerfile.rhel8 @@ -1,10 +1,10 @@ -FROM registry.ci.openshift.org/ocp/builder:rhel-8-golang-1.20-openshift-4.15 AS builder +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.20-openshift-4.15 AS builder WORKDIR /go/src/github.com/openshift/api COPY . . ENV GO_PACKAGE github.com/openshift/api RUN make build --warn-undefined-variables -FROM registry.ci.openshift.org/ocp/4.14:base +FROM registry.ci.openshift.org/ocp/4.15:base-rhel9 # copy the built binaries to /usr/bin COPY --from=builder /go/src/github.com/openshift/api/render /usr/bin/ diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml index 7cf29c2a84..9beee903a0 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml @@ -86,6 +86,7 @@ spec: - v4.12 - v4.13 - v4.14 + - v4.15 - vCurrent channel: description: channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters. diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml new file mode 100644 index 0000000000..b7954d79a6 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml @@ -0,0 +1,219 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: authentications.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Authentication + listKind: AuthenticationList + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + "schema": + "openAPIV3Schema": + description: "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + oauthMetadata: + description: 'oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key "oauthMetadata" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + oidcProviders: + description: "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". \n At most one provider can be configured." + type: array + maxItems: 1 + items: + type: object + required: + - issuer + - name + properties: + claimMappings: + description: ClaimMappings describes rules on how to transform information from an ID token into a cluster identity + type: object + properties: + groups: + description: Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values. + type: object + required: + - claim + properties: + claim: + description: Claim is a JWT token claim to be used in the mapping + type: string + prefix: + description: "Prefix is a string to prefix the value from the token in the result of the claim mapping. \n By default, no prefixing occurs. \n Example: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." + type: string + username: + description: "Username is a name of the claim that should be used to construct usernames for the cluster identity. \n Default value: \"sub\"" + type: object + required: + - claim + properties: + claim: + description: Claim is a JWT token claim to be used in the mapping + type: string + prefix: + type: object + required: + - prefixString + properties: + prefixString: + type: string + minLength: 1 + prefixPolicy: + description: "PrefixPolicy specifies how a prefix should apply. \n By default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins. \n Set to \"NoPrefix\" to disable prefixing. \n Example: (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\". If the JWT claim `username` contains value `userA`, the resulting mapped value will be \"myoidc:userA\". (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the JWT `email` claim contains value \"userA@myoidc.tld\", the resulting mapped value will be \"myoidc:userA@myoidc.tld\". (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\", and `claim` is set to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" + type: string + enum: + - "" + - NoPrefix + - Prefix + x-kubernetes-validations: + - rule: 'has(self.prefixPolicy) && self.prefixPolicy == ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)' + message: prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise + claimValidationRules: + description: ClaimValidationRules are rules that are applied to validate token claims to authenticate users. + type: array + items: + type: object + properties: + requiredClaim: + description: RequiredClaim allows configuring a required claim name and its expected value + type: object + required: + - claim + - requiredValue + properties: + claim: + description: Claim is a name of a required claim. Only claims with string values are supported. + type: string + minLength: 1 + requiredValue: + description: RequiredValue is the required value for the claim. + type: string + minLength: 1 + type: + description: Type sets the type of the validation rule + type: string + default: RequiredClaim + enum: + - RequiredClaim + x-kubernetes-list-type: atomic + issuer: + description: Issuer describes atributes of the OIDC token issuer + type: object + required: + - audiences + - issuerURL + properties: + audiences: + description: Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their "aud" claim. Must be set to exactly one value. + type: array + maxItems: 1 + items: + type: string + minLength: 1 + x-kubernetes-list-type: set + issuerCertificateAuthority: + description: CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the "ca-bundle.crt" key. If unset, system trust is used instead. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + issuerURL: + description: URL is the serving URL of the token issuer. Must use the https:// scheme. + type: string + pattern: ^https:\/\/[^\s] + name: + description: Name of the OIDC provider + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + serviceAccountIssuer: + description: 'serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.' + type: string + type: + description: type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. + type: string + webhookTokenAuthenticator: + description: "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. \n Can only be set if \"Type\" is set to \"None\"." + type: object + required: + - kubeConfig + properties: + kubeConfig: + description: "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config. \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication \n The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored." + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + webhookTokenAuthenticators: + description: webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + type: array + items: + description: deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. + type: object + properties: + kubeConfig: + description: 'kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + x-kubernetes-list-type: atomic + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + integratedOAuthMetadata: + description: 'integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key "oauthMetadata" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml new file mode 100644 index 0000000000..e8047a40ba --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml @@ -0,0 +1,219 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: authentications.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Authentication + listKind: AuthenticationList + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + "schema": + "openAPIV3Schema": + description: "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + oauthMetadata: + description: 'oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key "oauthMetadata" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + oidcProviders: + description: "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". \n At most one provider can be configured." + type: array + maxItems: 1 + items: + type: object + required: + - issuer + - name + properties: + claimMappings: + description: ClaimMappings describes rules on how to transform information from an ID token into a cluster identity + type: object + properties: + groups: + description: Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values. + type: object + required: + - claim + properties: + claim: + description: Claim is a JWT token claim to be used in the mapping + type: string + prefix: + description: "Prefix is a string to prefix the value from the token in the result of the claim mapping. \n By default, no prefixing occurs. \n Example: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." + type: string + username: + description: "Username is a name of the claim that should be used to construct usernames for the cluster identity. \n Default value: \"sub\"" + type: object + required: + - claim + properties: + claim: + description: Claim is a JWT token claim to be used in the mapping + type: string + prefix: + type: object + required: + - prefixString + properties: + prefixString: + type: string + minLength: 1 + prefixPolicy: + description: "PrefixPolicy specifies how a prefix should apply. \n By default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins. \n Set to \"NoPrefix\" to disable prefixing. \n Example: (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\". If the JWT claim `username` contains value `userA`, the resulting mapped value will be \"myoidc:userA\". (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the JWT `email` claim contains value \"userA@myoidc.tld\", the resulting mapped value will be \"myoidc:userA@myoidc.tld\". (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\", and `claim` is set to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" + type: string + enum: + - "" + - NoPrefix + - Prefix + x-kubernetes-validations: + - rule: 'has(self.prefixPolicy) && self.prefixPolicy == ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)' + message: prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise + claimValidationRules: + description: ClaimValidationRules are rules that are applied to validate token claims to authenticate users. + type: array + items: + type: object + properties: + requiredClaim: + description: RequiredClaim allows configuring a required claim name and its expected value + type: object + required: + - claim + - requiredValue + properties: + claim: + description: Claim is a name of a required claim. Only claims with string values are supported. + type: string + minLength: 1 + requiredValue: + description: RequiredValue is the required value for the claim. + type: string + minLength: 1 + type: + description: Type sets the type of the validation rule + type: string + default: RequiredClaim + enum: + - RequiredClaim + x-kubernetes-list-type: atomic + issuer: + description: Issuer describes atributes of the OIDC token issuer + type: object + required: + - audiences + - issuerURL + properties: + audiences: + description: Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their "aud" claim. Must be set to exactly one value. + type: array + maxItems: 1 + items: + type: string + minLength: 1 + x-kubernetes-list-type: set + issuerCertificateAuthority: + description: CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the "ca-bundle.crt" key. If unset, system trust is used instead. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + issuerURL: + description: URL is the serving URL of the token issuer. Must use the https:// scheme. + type: string + pattern: ^https:\/\/[^\s] + name: + description: Name of the OIDC provider + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + serviceAccountIssuer: + description: 'serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.' + type: string + type: + description: type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. + type: string + webhookTokenAuthenticator: + description: "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. \n Can only be set if \"Type\" is set to \"None\"." + type: object + required: + - kubeConfig + properties: + kubeConfig: + description: "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config. \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication \n The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored." + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + webhookTokenAuthenticators: + description: webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + type: array + items: + description: deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. + type: object + properties: + kubeConfig: + description: 'kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + x-kubernetes-list-type: atomic + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + integratedOAuthMetadata: + description: 'integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key "oauthMetadata" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml index facf7c6b09..b0cd9e67fc 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml @@ -6,6 +6,7 @@ metadata: include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: Default name: authentications.config.openshift.io spec: group: config.openshift.io @@ -52,7 +53,7 @@ spec: description: type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. type: string webhookTokenAuthenticator: - description: webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. + description: "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. \n Can only be set if \"Type\" is set to \"None\"." type: object required: - kubeConfig @@ -82,6 +83,7 @@ spec: name: description: name is the metadata.name of the referenced secret type: string + x-kubernetes-list-type: atomic status: description: status holds observed values from the cluster. They may not be overridden. type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml index 1b96b19c7e..32b7180026 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml @@ -268,6 +268,12 @@ spec: maxLength: 2048 pattern: ^/.*?/host/.*?/Resources.* type: string + template: + description: "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters. \n When omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea." + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string required: - computeCluster - datacenter @@ -655,7 +661,7 @@ spec: description: key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` and `openshift-io`. maxLength: 63 minLength: 1 - pattern: ^[a-z][0-9a-z_-]+$ + pattern: ^[a-z][0-9a-z_-]{0,62}$ type: string x-kubernetes-validations: - message: label keys must not start with either `openshift-io` or `kubernetes-io` @@ -664,7 +670,7 @@ spec: description: value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. maxLength: 63 minLength: 1 - pattern: ^[0-9a-z_-]+$ + pattern: ^[0-9a-z_-]{1,63}$ type: string required: - key diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml index 1b84d0ae6f..af0a0737e0 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml @@ -268,6 +268,12 @@ spec: maxLength: 2048 pattern: ^/.*?/host/.*?/Resources.* type: string + template: + description: "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters. \n When omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea." + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string required: - computeCluster - datacenter @@ -655,7 +661,7 @@ spec: description: key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` and `openshift-io`. maxLength: 63 minLength: 1 - pattern: ^[a-z][0-9a-z_-]+$ + pattern: ^[a-z][0-9a-z_-]{0,62}$ type: string x-kubernetes-validations: - message: label keys must not start with either `openshift-io` or `kubernetes-io` @@ -664,7 +670,7 @@ spec: description: value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. maxLength: 63 minLength: 1 - pattern: ^[0-9a-z_-]+$ + pattern: ^[0-9a-z_-]{1,63}$ type: string required: - key diff --git a/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml new file mode 100644 index 0000000000..aceb3ebd65 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml @@ -0,0 +1,14 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[CustomNoUpgrade] Authentication" +crd: 0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml +tests: + onCreate: + - name: Should be able to create a minimal Authentication + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: {} # No spec is required for a Authentication + expected: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/feature_gates.go b/vendor/github.com/openshift/api/config/v1/feature_gates.go index 158487b5a8..cbc494e0ad 100644 --- a/vendor/github.com/openshift/api/config/v1/feature_gates.go +++ b/vendor/github.com/openshift/api/config/v1/feature_gates.go @@ -53,16 +53,6 @@ var ( OwningProduct: ocpSpecific, } - FeatureGateRetroactiveDefaultStorageClass = FeatureGateName("RetroactiveDefaultStorageClass") - retroactiveDefaultStorageClass = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateRetroactiveDefaultStorageClass, - }, - OwningJiraComponent: "storage", - ResponsiblePerson: "RomanBednar", - OwningProduct: kubernetes, - } - FeatureGateExternalCloudProvider = FeatureGateName("ExternalCloudProvider") externalCloudProvider = FeatureGateDescription{ FeatureGateAttributes: FeatureGateAttributes{ @@ -291,4 +281,44 @@ var ( ResponsiblePerson: "msluiter", OwningProduct: ocpSpecific, } + + FeatureGateDNSNameResolver = FeatureGateName("DNSNameResolver") + dnsNameResolver = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateDNSNameResolver, + }, + OwningJiraComponent: "dns", + ResponsiblePerson: "miciah", + OwningProduct: ocpSpecific, + } + + FeatureGateVSphereControlPlaneMachineset = FeatureGateName("VSphereControlPlaneMachineSet") + vSphereControlPlaneMachineset = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateVSphereControlPlaneMachineset, + }, + OwningJiraComponent: "splat", + ResponsiblePerson: "rvanderp3", + OwningProduct: ocpSpecific, + } + + FeatureGateMachineConfigNodes = FeatureGateName("MachineConfigNodes") + machineConfigNodes = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateMachineConfigNodes, + }, + OwningJiraComponent: "MachineConfigOperator", + ResponsiblePerson: "cdoern", + OwningProduct: ocpSpecific, + } + + FeatureGateClusterAPIInstall = FeatureGateName("ClusterAPIInstall") + clusterAPIInstall = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateClusterAPIInstall, + }, + OwningJiraComponent: "Installer", + ResponsiblePerson: "vincepri", + OwningProduct: ocpSpecific, + } ) diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml new file mode 100644 index 0000000000..b29790dbd6 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml @@ -0,0 +1,110 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreviewNoUpgrade] Authentication" +crd: 0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml +tests: + onCreate: + - name: Should be able to create a minimal Authentication + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: {} # No spec is required for a Authentication + expected: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: {} + - name: Cannot set username claim prefix with policy NoPrefix + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: NoPrefix + prefix: + prefixString: "myoidc:" + expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" + - name: Can set username claim prefix with policy Prefix + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: Prefix + prefix: + prefixString: "myoidc:" + expected: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: Prefix + prefix: + prefixString: "myoidc:" + - name: Cannot leave username claim prefix blank with policy Prefix + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: Prefix + expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" + - name: Can set OIDC providers with no username prefixing + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: NoPrefix + expected: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: NoPrefix diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index dd2ef6e0ae..72c346ce11 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -4,6 +4,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient // +genclient:nonNamespaced +// +kubebuilder:subresource:status // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Authentication specifies cluster-wide settings for authentication (like OAuth and @@ -50,12 +51,16 @@ type AuthenticationSpec struct { OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"` // webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + // +listType=atomic WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"` // webhookTokenAuthenticator configures a remote token reviewer. // These remote authentication webhooks can be used to verify bearer tokens // via the tokenreviews.authentication.k8s.io REST API. This is required to // honor bearer tokens that are provisioned by an external authentication service. + // + // Can only be set if "Type" is set to "None". + // // +optional WebhookTokenAuthenticator *WebhookTokenAuthenticator `json:"webhookTokenAuthenticator,omitempty"` @@ -69,6 +74,18 @@ type AuthenticationSpec struct { // This allows internal components to transition to use new service account issuer without service distruption. // +optional ServiceAccountIssuer string `json:"serviceAccountIssuer"` + + // OIDCProviders are OIDC identity providers that can issue tokens + // for this cluster + // Can only be set if "Type" is set to "OIDC". + // + // At most one provider can be configured. + // + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=1 + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` } type AuthenticationStatus struct { @@ -110,15 +127,17 @@ type AuthenticationType string const ( // None means that no cluster managed authentication system is in place. // Note that user login will only work if a manually configured system is in place and - // referenced in authentication spec via oauthMetadata and webhookTokenAuthenticators. + // referenced in authentication spec via oauthMetadata and + // webhookTokenAuthenticator/oidcProviders AuthenticationTypeNone AuthenticationType = "None" // IntegratedOAuth refers to the cluster managed OAuth server. // It is configured via the top level OAuth config. AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth" - // TODO if we add support for an in-cluster operator managed Keycloak instance - // AuthenticationTypeKeycloak AuthenticationType = "Keycloak" + // AuthenticationTypeOIDC refers to a configuration with an external + // OIDC server configured directly with the kube-apiserver. + AuthenticationTypeOIDC AuthenticationType = "OIDC" ) // deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. @@ -159,3 +178,177 @@ const ( // KubeConfigKey is the key for the kube config file data in a secret KubeConfigKey = "kubeConfig" ) + +type OIDCProvider struct { + // Name of the OIDC provider + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + // Issuer describes atributes of the OIDC token issuer + // + // +kubebuilder:validation:Required + // +required + Issuer TokenIssuer `json:"issuer"` + + // ClaimMappings describes rules on how to transform information from an + // ID token into a cluster identity + ClaimMappings TokenClaimMappings `json:"claimMappings"` + + // ClaimValidationRules are rules that are applied to validate token claims to authenticate users. + // + // +listType=atomic + ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` +} + +// +kubebuilder:validation:MinLength=1 +type TokenAudience string + +type TokenIssuer struct { + // URL is the serving URL of the token issuer. + // Must use the https:// scheme. + // + // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` + // +kubebuilder:validation:Required + // +required + URL string `json:"issuerURL"` + + // Audiences is an array of audiences that the token was issued for. + // Valid tokens must include at least one of these values in their + // "aud" claim. + // Must be set to exactly one value. + // + // +listType=set + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxItems=1 + // +required + Audiences []TokenAudience `json:"audiences"` + + // CertificateAuthority is a reference to a config map in the + // configuration namespace. The .data of the configMap must contain + // the "ca-bundle.crt" key. + // If unset, system trust is used instead. + CertificateAuthority ConfigMapNameReference `json:"issuerCertificateAuthority"` +} + +type TokenClaimMappings struct { + // Username is a name of the claim that should be used to construct + // usernames for the cluster identity. + // + // Default value: "sub" + Username UsernameClaimMapping `json:"username,omitempty"` + + // Groups is a name of the claim that should be used to construct + // groups for the cluster identity. + // The referenced claim must use array of strings values. + Groups PrefixedClaimMapping `json:"groups,omitempty"` +} + +type TokenClaimMapping struct { + // Claim is a JWT token claim to be used in the mapping + // + // +kubebuilder:validation:Required + // +required + Claim string `json:"claim"` +} + +// +kubebuilder:validation:XValidation:rule="has(self.prefixPolicy) && self.prefixPolicy == 'Prefix' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" +type UsernameClaimMapping struct { + TokenClaimMapping `json:",inline"` + + // PrefixPolicy specifies how a prefix should apply. + // + // By default, claims other than `email` will be prefixed with the issuer URL to + // prevent naming clashes with other plugins. + // + // Set to "NoPrefix" to disable prefixing. + // + // Example: + // (1) `prefix` is set to "myoidc:" and `claim` is set to "username". + // If the JWT claim `username` contains value `userA`, the resulting + // mapped value will be "myoidc:userA". + // (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the + // JWT `email` claim contains value "userA@myoidc.tld", the resulting + // mapped value will be "myoidc:userA@myoidc.tld". + // (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, + // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", + // and `claim` is set to: + // (a) "username": the mapped value will be "https://myoidc.tld#userA" + // (b) "email": the mapped value will be "userA@myoidc.tld" + // + // +kubebuilder:validation:Enum={"", "NoPrefix", "Prefix"} + PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy"` + + Prefix *UsernamePrefix `json:"prefix"` +} + +type UsernamePrefixPolicy string + +var ( + // NoOpinion let's the cluster assign prefixes. If the username claim is email, there is no prefix + // If the username claim is anything else, it is prefixed by the issuerURL + NoOpinion UsernamePrefixPolicy = "" + + // NoPrefix means the username claim value will not have any prefix + NoPrefix UsernamePrefixPolicy = "NoPrefix" + + // Prefix means the prefix value must be specified. It cannot be empty + Prefix UsernamePrefixPolicy = "Prefix" +) + +type UsernamePrefix struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + PrefixString string `json:"prefixString"` +} + +type PrefixedClaimMapping struct { + TokenClaimMapping `json:",inline"` + + // Prefix is a string to prefix the value from the token in the result of the + // claim mapping. + // + // By default, no prefixing occurs. + // + // Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains + // an array of strings "a", "b" and "c", the mapping will result in an + // array of string "myoidc:a", "myoidc:b" and "myoidc:c". + Prefix string `json:"prefix"` +} + +type TokenValidationRuleType string + +const ( + TokenValidationRuleTypeRequiredClaim = "RequiredClaim" +) + +type TokenClaimValidationRule struct { + // Type sets the type of the validation rule + // + // +kubebuilder:validation:Enum={"RequiredClaim"} + // +kubebuilder:default="RequiredClaim" + Type TokenValidationRuleType `json:"type"` + + // RequiredClaim allows configuring a required claim name and its expected + // value + RequiredClaim *TokenRequiredClaim `json:"requiredClaim"` +} + +type TokenRequiredClaim struct { + // Claim is a name of a required claim. Only claims with string values are + // supported. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + Claim string `json:"claim"` + + // RequiredValue is the required value for the claim. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + RequiredValue string `json:"requiredValue"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index a9bade6fe7..e5a03bac71 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -363,7 +363,7 @@ var KnownClusterVersionCapabilities = []ClusterVersionCapability{ } // ClusterVersionCapabilitySet defines sets of cluster version capabilities. -// +kubebuilder:validation:Enum=None;v4.11;v4.12;v4.13;v4.14;vCurrent +// +kubebuilder:validation:Enum=None;v4.11;v4.12;v4.13;v4.14;v4.15;vCurrent type ClusterVersionCapabilitySet string const ( @@ -395,6 +395,12 @@ const ( // version of OpenShift is installed. ClusterVersionCapabilitySet4_14 ClusterVersionCapabilitySet = "v4.14" + // ClusterVersionCapabilitySet4_15 is the recommended set of + // optional capabilities to enable for the 4.15 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_15 ClusterVersionCapabilitySet = "v4.15" + // ClusterVersionCapabilitySetCurrent is the recommended set // of optional capabilities to enable for the cluster's // current version of OpenShift. @@ -445,6 +451,21 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, }, + ClusterVersionCapabilitySet4_15: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + }, ClusterVersionCapabilitySetCurrent: { ClusterVersionCapabilityBaremetal, ClusterVersionCapabilityConsole, diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 149dd1194d..363ae65444 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -168,7 +168,6 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ with(nodeSwap). with(machineAPIProviderOpenStack). with(insightsConfigAPI). - with(retroactiveDefaultStorageClass). with(dynamicResourceAllocation). with(gateGatewayAPI). with(maxUnavailableStatefulSet). @@ -178,8 +177,12 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ with(vSphereStaticIPs). with(routeExternalCertificate). with(automatedEtcdBackup). + with(vSphereControlPlaneMachineset). without(machineAPIOperatorDisableMachineHealthCheckController). with(adminNetworkPolicy). + with(dnsNameResolver). + with(machineConfigNodes). + with(clusterAPIInstall). toFeatures(defaultFeatures), LatencySensitive: newDefaultFeatures(). toFeatures(defaultFeatures), @@ -198,9 +201,7 @@ var defaultFeatures = &FeatureGateEnabledDisabled{ privateHostedZoneAWS, buildCSIVolumes, }, - Disabled: []FeatureGateDescription{ - retroactiveDefaultStorageClass, - }, + Disabled: []FeatureGateDescription{}, } type featureSetBuilder struct { diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 18d36519d1..510bb01a14 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -622,7 +622,7 @@ type GCPResourceLabel struct { // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]+$` + // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]{0,62}$` Key string `json:"key"` // value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. @@ -630,7 +630,7 @@ type GCPResourceLabel struct { // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Pattern=`^[0-9a-z_-]+$` + // +kubebuilder:validation:Pattern=`^[0-9a-z_-]{1,63}$` Value string `json:"value"` } @@ -1010,6 +1010,22 @@ type VSpherePlatformTopology struct { // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` // +optional Folder string `json:"folder,omitempty"` + + // template is the full inventory path of the virtual machine or template + // that will be cloned when creating new machines in this failure domain. + // The maximum length of the path is 2048 characters. + // + // When omitted, the template will be calculated by the control plane + // machineset operator based on the region and zone defined in + // VSpherePlatformFailureDomainSpec. + // For example, for zone=zonea, region=region1, and infrastructure name=test, + // the template path would be calculated as //vm/test-rhcos-region1-zonea. + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` + // +optional + Template string `json:"template,omitempty"` } // VSpherePlatformVCenterSpec stores the vCenter connection fields. @@ -1209,13 +1225,13 @@ type IBMCloudPlatformStatus struct { // for the cluster's base domain DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` - // serviceEndpoints is a list of custom endpoints which will override the default - // service endpoints of an IBM Cloud service. These endpoints are consumed by + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of an IBM Cloud service. These endpoints are consumed by // components within the cluster to reach the respective IBM Cloud Services. - // +listType=map - // +listMapKey=name - // +optional - ServiceEndpoints []IBMCloudServiceEndpoint `json:"serviceEndpoints,omitempty"` + // +listType=map + // +listMapKey=name + // +optional + ServiceEndpoints []IBMCloudServiceEndpoint `json:"serviceEndpoints,omitempty"` } // KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 63b9f050d0..11a612240d 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -522,6 +522,13 @@ func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { *out = new(WebhookTokenAuthenticator) **out = **in } + if in.OIDCProviders != nil { + in, out := &in.OIDCProviders, &out.OIDCProviders + *out = make([]OIDCProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3952,6 +3959,31 @@ func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCProvider) DeepCopyInto(out *OIDCProvider) { + *out = *in + in.Issuer.DeepCopyInto(&out.Issuer) + in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) + if in.ClaimValidationRules != nil { + in, out := &in.ClaimValidationRules, &out.ClaimValidationRules + *out = make([]TokenClaimValidationRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCProvider. +func (in *OIDCProvider) DeepCopy() *OIDCProvider { + if in == nil { + return nil + } + out := new(OIDCProvider) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { *out = *in @@ -4526,6 +4558,23 @@ func (in *PowerVSServiceEndpoint) DeepCopy() *PowerVSServiceEndpoint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixedClaimMapping) DeepCopyInto(out *PrefixedClaimMapping) { + *out = *in + out.TokenClaimMapping = in.TokenClaimMapping + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimMapping. +func (in *PrefixedClaimMapping) DeepCopy() *PrefixedClaimMapping { + if in == nil { + return nil + } + out := new(PrefixedClaimMapping) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Project) DeepCopyInto(out *Project) { *out = *in @@ -5160,6 +5209,61 @@ func (in *TemplateReference) DeepCopy() *TemplateReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimMapping) DeepCopyInto(out *TokenClaimMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMapping. +func (in *TokenClaimMapping) DeepCopy() *TokenClaimMapping { + if in == nil { + return nil + } + out := new(TokenClaimMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimMappings) DeepCopyInto(out *TokenClaimMappings) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + out.Groups = in.Groups + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMappings. +func (in *TokenClaimMappings) DeepCopy() *TokenClaimMappings { + if in == nil { + return nil + } + out := new(TokenClaimMappings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) { + *out = *in + if in.RequiredClaim != nil { + in, out := &in.RequiredClaim, &out.RequiredClaim + *out = new(TokenRequiredClaim) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimValidationRule. +func (in *TokenClaimValidationRule) DeepCopy() *TokenClaimValidationRule { + if in == nil { + return nil + } + out := new(TokenClaimValidationRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { *out = *in @@ -5181,6 +5285,44 @@ func (in *TokenConfig) DeepCopy() *TokenConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenIssuer) DeepCopyInto(out *TokenIssuer) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]TokenAudience, len(*in)) + copy(*out, *in) + } + out.CertificateAuthority = in.CertificateAuthority + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenIssuer. +func (in *TokenIssuer) DeepCopy() *TokenIssuer { + if in == nil { + return nil + } + out := new(TokenIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequiredClaim) DeepCopyInto(out *TokenRequiredClaim) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequiredClaim. +func (in *TokenRequiredClaim) DeepCopy() *TokenRequiredClaim { + if in == nil { + return nil + } + out := new(TokenRequiredClaim) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Update) DeepCopyInto(out *Update) { *out = *in @@ -5218,6 +5360,44 @@ func (in *UpdateHistory) DeepCopy() *UpdateHistory { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) { + *out = *in + out.TokenClaimMapping = in.TokenClaimMapping + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(UsernamePrefix) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernameClaimMapping. +func (in *UsernameClaimMapping) DeepCopy() *UsernameClaimMapping { + if in == nil { + return nil + } + out := new(UsernameClaimMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernamePrefix) DeepCopyInto(out *UsernamePrefix) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernamePrefix. +func (in *UsernamePrefix) DeepCopy() *UsernamePrefix { + if in == nil { + return nil + } + out := new(UsernamePrefix) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VSpherePlatformFailureDomainSpec) DeepCopyInto(out *VSpherePlatformFailureDomainSpec) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 048c37b16f..7977078291 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -367,8 +367,9 @@ var map_AuthenticationSpec = map[string]string{ "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.", "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.", "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.", - "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.", + "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.\n\nCan only be set if \"Type\" is set to \"None\".", "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.", + "oidcProviders": "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", } func (AuthenticationSpec) SwaggerDoc() map[string]string { @@ -392,6 +393,78 @@ func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { return map_DeprecatedWebhookTokenAuthenticator } +var map_OIDCProvider = map[string]string{ + "name": "Name of the OIDC provider", + "issuer": "Issuer describes atributes of the OIDC token issuer", + "claimMappings": "ClaimMappings describes rules on how to transform information from an ID token into a cluster identity", + "claimValidationRules": "ClaimValidationRules are rules that are applied to validate token claims to authenticate users.", +} + +func (OIDCProvider) SwaggerDoc() map[string]string { + return map_OIDCProvider +} + +var map_PrefixedClaimMapping = map[string]string{ + "prefix": "Prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", +} + +func (PrefixedClaimMapping) SwaggerDoc() map[string]string { + return map_PrefixedClaimMapping +} + +var map_TokenClaimMapping = map[string]string{ + "claim": "Claim is a JWT token claim to be used in the mapping", +} + +func (TokenClaimMapping) SwaggerDoc() map[string]string { + return map_TokenClaimMapping +} + +var map_TokenClaimMappings = map[string]string{ + "username": "Username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", + "groups": "Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", +} + +func (TokenClaimMappings) SwaggerDoc() map[string]string { + return map_TokenClaimMappings +} + +var map_TokenClaimValidationRule = map[string]string{ + "type": "Type sets the type of the validation rule", + "requiredClaim": "RequiredClaim allows configuring a required claim name and its expected value", +} + +func (TokenClaimValidationRule) SwaggerDoc() map[string]string { + return map_TokenClaimValidationRule +} + +var map_TokenIssuer = map[string]string{ + "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", + "audiences": "Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", + "issuerCertificateAuthority": "CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the \"ca-bundle.crt\" key. If unset, system trust is used instead.", +} + +func (TokenIssuer) SwaggerDoc() map[string]string { + return map_TokenIssuer +} + +var map_TokenRequiredClaim = map[string]string{ + "claim": "Claim is a name of a required claim. Only claims with string values are supported.", + "requiredValue": "RequiredValue is the required value for the claim.", +} + +func (TokenRequiredClaim) SwaggerDoc() map[string]string { + return map_TokenRequiredClaim +} + +var map_UsernameClaimMapping = map[string]string{ + "prefixPolicy": "PrefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", +} + +func (UsernameClaimMapping) SwaggerDoc() map[string]string { + return map_UsernameClaimMapping +} + var map_WebhookTokenAuthenticator = map[string]string{ "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator", "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.", @@ -1636,6 +1709,7 @@ var map_VSpherePlatformTopology = map[string]string{ "datastore": "datastore is the absolute path of the datastore in which the virtual machine is located. The absolute path is of the form //datastore/ The maximum length of the path is 2048 characters.", "resourcePool": "resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters.", "folder": "folder is the absolute path of the folder where virtual machines are located. The absolute path is of the form //vm/. The maximum length of the path is 2048 characters.", + "template": "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters.\n\nWhen omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea.", } func (VSpherePlatformTopology) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/install.go b/vendor/github.com/openshift/api/install.go index d7668b3c03..cc91150009 100644 --- a/vendor/github.com/openshift/api/install.go +++ b/vendor/github.com/openshift/api/install.go @@ -26,7 +26,6 @@ import ( keventsv1 "k8s.io/api/events/v1" keventsv1beta1 "k8s.io/api/events/v1beta1" kextensionsv1beta1 "k8s.io/api/extensions/v1beta1" - kflowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" kflowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" kflowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" kimagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" @@ -143,7 +142,6 @@ var ( keventsv1.AddToScheme, keventsv1beta1.AddToScheme, kextensionsv1beta1.AddToScheme, - kflowcontrolv1alpha1.AddToScheme, kflowcontrolv1beta1.AddToScheme, kflowcontrolv1beta2.AddToScheme, kimagepolicyv1alpha1.AddToScheme, diff --git a/vendor/github.com/openshift/api/machine/.codegen.yaml b/vendor/github.com/openshift/api/machine/.codegen.yaml index ffa2c8d9b2..e799336feb 100644 --- a/vendor/github.com/openshift/api/machine/.codegen.yaml +++ b/vendor/github.com/openshift/api/machine/.codegen.yaml @@ -1,2 +1,8 @@ +schemapatch: + requiredFeatureSets: + - "" + - "Default" + - "TechPreviewNoUpgrade" + - "CustomNoUpgrade" swaggerdocs: commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..fefe032537 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-CustomNoUpgrade.crd.yaml @@ -0,0 +1,584 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1112 + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + creationTimestamp: null + name: controlplanemachinesets.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: ControlPlaneMachineSet + listKind: ControlPlaneMachineSetList + plural: controlplanemachinesets + singular: controlplanemachineset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Desired Replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: Current Replicas + jsonPath: .status.replicas + name: Current + type: integer + - description: Ready Replicas + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Updated Replicas + jsonPath: .status.updatedReplicas + name: Updated + type: integer + - description: Observed number of unavailable replicas + jsonPath: .status.unavailableReplicas + name: Unavailable + type: integer + - description: ControlPlaneMachineSet state + jsonPath: .spec.state + name: State + type: string + - description: ControlPlaneMachineSet age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: 'ControlPlaneMachineSet ensures that a specified number of control plane machine replicas are running at any given time. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. + type: object + required: + - replicas + - selector + - template + properties: + replicas: + description: Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field. + type: integer + format: int32 + default: 3 + enum: + - 3 + - 5 + x-kubernetes-validations: + - rule: self == oldSelf + message: replicas is immutable + selector: + description: Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - rule: self == oldSelf + message: selector is immutable + state: + description: State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet. + type: string + default: Inactive + enum: + - Active + - Inactive + x-kubernetes-validations: + - rule: oldSelf != 'Active' || self == oldSelf + message: state cannot be changed once Active + strategy: + description: Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec. + type: object + default: + type: RollingUpdate + properties: + type: + description: Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are "RollingUpdate" and "OnDelete". The current default value is "RollingUpdate". + type: string + default: RollingUpdate + enum: + - RollingUpdate + - OnDelete + template: + description: Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet. + type: object + required: + - machineType + properties: + machineType: + description: MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io. + type: string + enum: + - machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + description: OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group. + type: object + required: + - metadata + - spec + properties: + failureDomains: + description: FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information. + type: object + required: + - platform + properties: + aws: + description: AWS configures failure domain information for the AWS platform. + type: array + items: + description: AWSFailureDomain configures failure domain information for the AWS platform. + type: object + minProperties: 1 + properties: + placement: + description: Placement configures the placement information for this instance. + type: object + required: + - availabilityZone + properties: + availabilityZone: + description: AvailabilityZone is the availability zone of the instance. + type: string + subnet: + description: Subnet is a reference to the subnet to use for this instance. + type: object + required: + - type + properties: + arn: + description: ARN of resource. + type: string + filters: + description: Filters is a set of filters used to identify a resource. + type: array + items: + description: AWSResourceFilter is a filter used to identify an AWS resource + type: object + required: + - name + properties: + name: + description: Name of the filter. Filter names are case-sensitive. + type: string + values: + description: Values includes one or more filter values. Filter values are case-sensitive. + type: array + items: + type: string + id: + description: ID of resource. + type: string + type: + description: Type determines how the reference will fetch the AWS resource. + type: string + enum: + - ID + - ARN + - Filters + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''ID'' ? has(self.id) : !has(self.id)' + message: id is required when type is ID, and forbidden otherwise + - rule: 'has(self.type) && self.type == ''ARN'' ? has(self.arn) : !has(self.arn)' + message: arn is required when type is ARN, and forbidden otherwise + - rule: 'has(self.type) && self.type == ''Filters'' ? has(self.filters) : !has(self.filters)' + message: filters is required when type is Filters, and forbidden otherwise + azure: + description: Azure configures failure domain information for the Azure platform. + type: array + items: + description: AzureFailureDomain configures failure domain information for the Azure platform. + type: object + required: + - zone + properties: + subnet: + description: subnet is the name of the network subnet in which the VM will be created. When omitted, the subnet value from the machine providerSpec template will be used. + type: string + maxLength: 80 + pattern: ^[a-zA-Z0-9](?:[a-zA-Z0-9._-]*[a-zA-Z0-9_])?$ + zone: + description: Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone. + type: string + gcp: + description: GCP configures failure domain information for the GCP platform. + type: array + items: + description: GCPFailureDomain configures failure domain information for the GCP platform + type: object + required: + - zone + properties: + zone: + description: Zone is the zone in which the GCP machine provider will create the VM. + type: string + openstack: + description: OpenStack configures failure domain information for the OpenStack platform. + type: array + items: + description: OpenStackFailureDomain configures failure domain information for the OpenStack platform. + type: object + minProperties: 1 + properties: + availabilityZone: + description: 'availabilityZone is the nova availability zone in which the OpenStack machine provider will create the VM. If not specified, the VM will be created in the default availability zone specified in the nova configuration. Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits.' + type: string + maxLength: 63 + minLength: 1 + pattern: '^[^: ]*$' + rootVolume: + description: rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM. If not specified, no root volume will be created. + type: object + required: + - volumeType + properties: + availabilityZone: + description: availabilityZone specifies the Cinder availability zone where the root volume will be created. If not specifified, the root volume will be created in the availability zone specified by the volume type in the cinder configuration. If the volume type (configured in the OpenStack cluster) does not specify an availability zone, the root volume will be created in the default availability zone specified in the cinder configuration. See https://docs.openstack.org/cinder/latest/admin/availability-zone-type.html for more details. If the OpenStack cluster is deployed with the cross_az_attach configuration option set to false, the root volume will have to be in the same availability zone as the VM (defined by OpenStackFailureDomain.AvailabilityZone). Availability zone names must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits. + type: string + maxLength: 63 + minLength: 1 + pattern: ^[^ ]*$ + volumeType: + description: volumeType specifies the type of the root volume that will be provisioned. The maximum length of a volume type name is 255 characters, as per the OpenStack limit. + type: string + maxLength: 255 + minLength: 1 + x-kubernetes-validations: + - rule: '!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)' + message: rootVolume.availabilityZone is required when availabilityZone is set + platform: + description: Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, and VSphere. + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + vsphere: + description: vsphere configures failure domain information for the VSphere platform. + type: array + items: + description: VSphereFailureDomain configures failure domain information for the vSphere platform + type: object + required: + - name + properties: + name: + description: name of the failure domain in which the vSphere machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. When balancing machines across failure domains, the control plane machine set will inject configuration from the Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain. + type: string + x-kubernetes-validations: + - rule: 'has(self.platform) && self.platform == ''VSphere'' ? has(self.vsphere) : !has(self.vsphere)' + message: vsphere configuration is required when platform is VSphere, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''AWS'' ? has(self.aws) : !has(self.aws)' + message: aws configuration is required when platform is AWS, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''Azure'' ? has(self.azure) : !has(self.azure)' + message: azure configuration is required when platform is Azure, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''GCP'' ? has(self.gcp) : !has(self.gcp)' + message: gcp configuration is required when platform is GCP, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''OpenStack'' ? has(self.openstack) : !has(self.openstack)' + message: openstack configuration is required when platform is OpenStack, and forbidden otherwise + metadata: + description: 'ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.' + type: object + required: + - labels + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. This field must contain both the ''machine.openshift.io/cluster-api-machine-role'' and ''machine.openshift.io/cluster-api-machine-type'' labels, both with a value of ''master''. It must also contain a label with the key ''machine.openshift.io/cluster-api-cluster''.' + type: object + additionalProperties: + type: string + x-kubernetes-validations: + - rule: '''machine.openshift.io/cluster-api-machine-role'' in self && self[''machine.openshift.io/cluster-api-machine-role''] == ''master''' + message: label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master' + - rule: '''machine.openshift.io/cluster-api-machine-type'' in self && self[''machine.openshift.io/cluster-api-machine-type''] == ''master''' + message: label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master' + - rule: '''machine.openshift.io/cluster-api-cluster'' in self' + message: label 'machine.openshift.io/cluster-api-cluster' is required + spec: + description: Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field. + type: object + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + type: object + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + type: object + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + type: array + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + required: + - effect + - key + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + type: string + format: date-time + value: + description: The taint value corresponding to the taint key. + type: string + x-kubernetes-validations: + - rule: 'has(self.machineType) && self.machineType == ''machines_v1beta1_machine_openshift_io'' ? has(self.machines_v1beta1_machine_openshift_io) : !has(self.machines_v1beta1_machine_openshift_io)' + message: machines_v1beta1_machine_openshift_io configuration is required when machineType is machines_v1beta1_machine_openshift_io, and forbidden otherwise + status: + description: ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD. + type: object + properties: + conditions: + description: 'Conditions represents the observations of the ControlPlaneMachineSet''s current state. Known .status.conditions.type are: Available, Degraded and Progressing.' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server. + type: integer + format: int64 + readyReplicas: + description: ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress. + type: integer + format: int32 + replicas: + description: Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count. + type: integer + format: int32 + unavailableReplicas: + description: UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas. + type: integer + format: int32 + updatedReplicas: + description: UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready. + type: integer + format: int32 + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset.crd.yaml b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-Default.crd.yaml similarity index 99% rename from vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset.crd.yaml rename to vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-Default.crd.yaml index 97c0ae6009..c9dcfd4d01 100644 --- a/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-Default.crd.yaml @@ -284,7 +284,7 @@ spec: - rule: '!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)' message: rootVolume.availabilityZone is required when availabilityZone is set platform: - description: Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, and GCP. + description: Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, and VSphere. type: string enum: - "" diff --git a/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..7dfec9c689 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,584 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1112 + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + creationTimestamp: null + name: controlplanemachinesets.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: ControlPlaneMachineSet + listKind: ControlPlaneMachineSetList + plural: controlplanemachinesets + singular: controlplanemachineset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Desired Replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: Current Replicas + jsonPath: .status.replicas + name: Current + type: integer + - description: Ready Replicas + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Updated Replicas + jsonPath: .status.updatedReplicas + name: Updated + type: integer + - description: Observed number of unavailable replicas + jsonPath: .status.unavailableReplicas + name: Unavailable + type: integer + - description: ControlPlaneMachineSet state + jsonPath: .spec.state + name: State + type: string + - description: ControlPlaneMachineSet age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: 'ControlPlaneMachineSet ensures that a specified number of control plane machine replicas are running at any given time. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. + type: object + required: + - replicas + - selector + - template + properties: + replicas: + description: Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field. + type: integer + format: int32 + default: 3 + enum: + - 3 + - 5 + x-kubernetes-validations: + - rule: self == oldSelf + message: replicas is immutable + selector: + description: Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - rule: self == oldSelf + message: selector is immutable + state: + description: State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet. + type: string + default: Inactive + enum: + - Active + - Inactive + x-kubernetes-validations: + - rule: oldSelf != 'Active' || self == oldSelf + message: state cannot be changed once Active + strategy: + description: Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec. + type: object + default: + type: RollingUpdate + properties: + type: + description: Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are "RollingUpdate" and "OnDelete". The current default value is "RollingUpdate". + type: string + default: RollingUpdate + enum: + - RollingUpdate + - OnDelete + template: + description: Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet. + type: object + required: + - machineType + properties: + machineType: + description: MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io. + type: string + enum: + - machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + description: OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group. + type: object + required: + - metadata + - spec + properties: + failureDomains: + description: FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information. + type: object + required: + - platform + properties: + aws: + description: AWS configures failure domain information for the AWS platform. + type: array + items: + description: AWSFailureDomain configures failure domain information for the AWS platform. + type: object + minProperties: 1 + properties: + placement: + description: Placement configures the placement information for this instance. + type: object + required: + - availabilityZone + properties: + availabilityZone: + description: AvailabilityZone is the availability zone of the instance. + type: string + subnet: + description: Subnet is a reference to the subnet to use for this instance. + type: object + required: + - type + properties: + arn: + description: ARN of resource. + type: string + filters: + description: Filters is a set of filters used to identify a resource. + type: array + items: + description: AWSResourceFilter is a filter used to identify an AWS resource + type: object + required: + - name + properties: + name: + description: Name of the filter. Filter names are case-sensitive. + type: string + values: + description: Values includes one or more filter values. Filter values are case-sensitive. + type: array + items: + type: string + id: + description: ID of resource. + type: string + type: + description: Type determines how the reference will fetch the AWS resource. + type: string + enum: + - ID + - ARN + - Filters + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''ID'' ? has(self.id) : !has(self.id)' + message: id is required when type is ID, and forbidden otherwise + - rule: 'has(self.type) && self.type == ''ARN'' ? has(self.arn) : !has(self.arn)' + message: arn is required when type is ARN, and forbidden otherwise + - rule: 'has(self.type) && self.type == ''Filters'' ? has(self.filters) : !has(self.filters)' + message: filters is required when type is Filters, and forbidden otherwise + azure: + description: Azure configures failure domain information for the Azure platform. + type: array + items: + description: AzureFailureDomain configures failure domain information for the Azure platform. + type: object + required: + - zone + properties: + subnet: + description: subnet is the name of the network subnet in which the VM will be created. When omitted, the subnet value from the machine providerSpec template will be used. + type: string + maxLength: 80 + pattern: ^[a-zA-Z0-9](?:[a-zA-Z0-9._-]*[a-zA-Z0-9_])?$ + zone: + description: Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone. + type: string + gcp: + description: GCP configures failure domain information for the GCP platform. + type: array + items: + description: GCPFailureDomain configures failure domain information for the GCP platform + type: object + required: + - zone + properties: + zone: + description: Zone is the zone in which the GCP machine provider will create the VM. + type: string + openstack: + description: OpenStack configures failure domain information for the OpenStack platform. + type: array + items: + description: OpenStackFailureDomain configures failure domain information for the OpenStack platform. + type: object + minProperties: 1 + properties: + availabilityZone: + description: 'availabilityZone is the nova availability zone in which the OpenStack machine provider will create the VM. If not specified, the VM will be created in the default availability zone specified in the nova configuration. Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits.' + type: string + maxLength: 63 + minLength: 1 + pattern: '^[^: ]*$' + rootVolume: + description: rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM. If not specified, no root volume will be created. + type: object + required: + - volumeType + properties: + availabilityZone: + description: availabilityZone specifies the Cinder availability zone where the root volume will be created. If not specifified, the root volume will be created in the availability zone specified by the volume type in the cinder configuration. If the volume type (configured in the OpenStack cluster) does not specify an availability zone, the root volume will be created in the default availability zone specified in the cinder configuration. See https://docs.openstack.org/cinder/latest/admin/availability-zone-type.html for more details. If the OpenStack cluster is deployed with the cross_az_attach configuration option set to false, the root volume will have to be in the same availability zone as the VM (defined by OpenStackFailureDomain.AvailabilityZone). Availability zone names must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits. + type: string + maxLength: 63 + minLength: 1 + pattern: ^[^ ]*$ + volumeType: + description: volumeType specifies the type of the root volume that will be provisioned. The maximum length of a volume type name is 255 characters, as per the OpenStack limit. + type: string + maxLength: 255 + minLength: 1 + x-kubernetes-validations: + - rule: '!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)' + message: rootVolume.availabilityZone is required when availabilityZone is set + platform: + description: Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, and VSphere. + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + vsphere: + description: vsphere configures failure domain information for the VSphere platform. + type: array + items: + description: VSphereFailureDomain configures failure domain information for the vSphere platform + type: object + required: + - name + properties: + name: + description: name of the failure domain in which the vSphere machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. When balancing machines across failure domains, the control plane machine set will inject configuration from the Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain. + type: string + x-kubernetes-validations: + - rule: 'has(self.platform) && self.platform == ''VSphere'' ? has(self.vsphere) : !has(self.vsphere)' + message: vsphere configuration is required when platform is VSphere, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''AWS'' ? has(self.aws) : !has(self.aws)' + message: aws configuration is required when platform is AWS, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''Azure'' ? has(self.azure) : !has(self.azure)' + message: azure configuration is required when platform is Azure, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''GCP'' ? has(self.gcp) : !has(self.gcp)' + message: gcp configuration is required when platform is GCP, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''OpenStack'' ? has(self.openstack) : !has(self.openstack)' + message: openstack configuration is required when platform is OpenStack, and forbidden otherwise + metadata: + description: 'ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.' + type: object + required: + - labels + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. This field must contain both the ''machine.openshift.io/cluster-api-machine-role'' and ''machine.openshift.io/cluster-api-machine-type'' labels, both with a value of ''master''. It must also contain a label with the key ''machine.openshift.io/cluster-api-cluster''.' + type: object + additionalProperties: + type: string + x-kubernetes-validations: + - rule: '''machine.openshift.io/cluster-api-machine-role'' in self && self[''machine.openshift.io/cluster-api-machine-role''] == ''master''' + message: label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master' + - rule: '''machine.openshift.io/cluster-api-machine-type'' in self && self[''machine.openshift.io/cluster-api-machine-type''] == ''master''' + message: label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master' + - rule: '''machine.openshift.io/cluster-api-cluster'' in self' + message: label 'machine.openshift.io/cluster-api-cluster' is required + spec: + description: Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field. + type: object + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + type: object + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + type: object + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + type: array + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + required: + - effect + - key + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + type: string + format: date-time + value: + description: The taint value corresponding to the taint key. + type: string + x-kubernetes-validations: + - rule: 'has(self.machineType) && self.machineType == ''machines_v1beta1_machine_openshift_io'' ? has(self.machines_v1beta1_machine_openshift_io) : !has(self.machines_v1beta1_machine_openshift_io)' + message: machines_v1beta1_machine_openshift_io configuration is required when machineType is machines_v1beta1_machine_openshift_io, and forbidden otherwise + status: + description: ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD. + type: object + properties: + conditions: + description: 'Conditions represents the observations of the ControlPlaneMachineSet''s current state. Known .status.conditions.type are: Available, Degraded and Progressing.' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server. + type: integer + format: int64 + readyReplicas: + description: ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress. + type: integer + format: int32 + replicas: + description: Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count. + type: integer + format: int32 + unavailableReplicas: + description: UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas. + type: integer + format: int32 + updatedReplicas: + description: UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready. + type: integer + format: int32 + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1/custom.controlplanemachineset.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/custom.controlplanemachineset.testsuite.yaml new file mode 100644 index 0000000000..350be2bf31 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/custom.controlplanemachineset.testsuite.yaml @@ -0,0 +1,50 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[CustomNoUpgrade] ControlPlaneMachineSet" +crd: 0000_10_controlplanemachineset-CustomNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should reject a VSphere platform failure domain without any VSphere config + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: VSphere + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": vsphere configuration is required when platform is VSphere" + - name: Should reject a VSphere configured failure domain without a platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + vsphere: + - name: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.platform: Required value" diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml index 07a5ec7c13..f599fcc2db 100644 --- a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] ControlPlaneMachineSet (AWS)" -crd: 0000_10_controlplanemachineset.crd.yaml +crd: 0000_10_controlplanemachineset-Default.crd.yaml tests: onCreate: - name: Should reject an AWS platform failure domain without any AWS config diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml index 191bf65f2e..6b6295e536 100644 --- a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] ControlPlaneMachineSet" -crd: 0000_10_controlplanemachineset.crd.yaml +crd: 0000_10_controlplanemachineset-Default.crd.yaml tests: onCreate: - name: Should reject an Azure platform failure domain without any Azure config diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml index 518625f915..24e617286c 100644 --- a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] ControlPlaneMachineSet" -crd: 0000_10_controlplanemachineset.crd.yaml +crd: 0000_10_controlplanemachineset-Default.crd.yaml tests: onCreate: - name: Should reject an GCP platform failure domain without any GCP config diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml index a09de51e0f..ae65d8f7f4 100644 --- a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] ControlPlaneMachineSet" -crd: 0000_10_controlplanemachineset.crd.yaml +crd: 0000_10_controlplanemachineset-Default.crd.yaml tests: onCreate: - name: Should reject an OpenStack platform failure domain without any OpenStack config diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml index 3e65b31f64..dd4f09ccf6 100644 --- a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] ControlPlaneMachineSet" -crd: 0000_10_controlplanemachineset.crd.yaml +crd: 0000_10_controlplanemachineset-Default.crd.yaml tests: onCreate: - name: Should be able to create a minimal ControlPlaneMachineSet diff --git a/vendor/github.com/openshift/api/machine/v1/techpreview.controlplanemachineset.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/techpreview.controlplanemachineset.testsuite.yaml new file mode 100644 index 0000000000..519b0cad2c --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/techpreview.controlplanemachineset.testsuite.yaml @@ -0,0 +1,50 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreviewNoUpgrade] ControlPlaneMachineSet" +crd: 0000_10_controlplanemachineset-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should reject an VSphere platform failure domain without any VSphere config + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: VSphere + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": vsphere configuration is required when platform is VSphere" + - name: Should reject an VSphere configured failure domain without a platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + vsphere: + - name: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.platform: Required value" diff --git a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go index e46c39b552..5322e72e0c 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go +++ b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go @@ -138,7 +138,7 @@ type OpenShiftMachineV1Beta1MachineTemplate struct { // This will be merged into the ProviderSpec given in the template. // This field is optional on platforms that do not require placement information. // +optional - FailureDomains FailureDomains `json:"failureDomains,omitempty"` + FailureDomains *FailureDomains `json:"failureDomains,omitempty"` // ObjectMeta is the standard object metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -231,9 +231,10 @@ const ( // +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'Azure' ? has(self.azure) : !has(self.azure)",message="azure configuration is required when platform is Azure, and forbidden otherwise" // +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'GCP' ? has(self.gcp) : !has(self.gcp)",message="gcp configuration is required when platform is GCP, and forbidden otherwise" // +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'OpenStack' ? has(self.openstack) : !has(self.openstack)",message="openstack configuration is required when platform is OpenStack, and forbidden otherwise" +// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="has(self.platform) && self.platform == 'VSphere' ? has(self.vsphere) : !has(self.vsphere)",message="vsphere configuration is required when platform is VSphere, and forbidden otherwise" type FailureDomains struct { // Platform identifies the platform for which the FailureDomain represents. - // Currently supported values are AWS, Azure, and GCP. + // Currently supported values are AWS, Azure, GCP, OpenStack, and VSphere. // +unionDiscriminator // +kubebuilder:validation:Required Platform configv1.PlatformType `json:"platform"` @@ -250,6 +251,11 @@ type FailureDomains struct { // +optional GCP *[]GCPFailureDomain `json:"gcp,omitempty"` + // vsphere configures failure domain information for the VSphere platform. + // +optional + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + VSphere []VSphereFailureDomain `json:"vsphere,omitempty"` + // OpenStack configures failure domain information for the OpenStack platform. // +optional // @@ -303,6 +309,16 @@ type GCPFailureDomain struct { Zone string `json:"zone"` } +// VSphereFailureDomain configures failure domain information for the vSphere platform +type VSphereFailureDomain struct { + // name of the failure domain in which the vSphere machine provider will create the VM. + // Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. + // When balancing machines across failure domains, the control plane machine set will inject configuration from the + // Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain. + // +kubebuilder:validation:Required + Name string `json:"name"` +} + // OpenStackFailureDomain configures failure domain information for the OpenStack platform. // +kubebuilder:validation:MinProperties:=1 // +kubebuilder:validation:XValidation:rule="!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)",message="rootVolume.availabilityZone is required when availabilityZone is set" diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go index ca31843272..507c6cbd59 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go @@ -530,6 +530,11 @@ func (in *FailureDomains) DeepCopyInto(out *FailureDomains) { copy(*out, *in) } } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = make([]VSphereFailureDomain, len(*in)) + copy(*out, *in) + } if in.OpenStack != nil { in, out := &in.OpenStack, &out.OpenStack *out = make([]OpenStackFailureDomain, len(*in)) @@ -717,7 +722,11 @@ func (in *NutanixResourceIdentifier) DeepCopy() *NutanixResourceIdentifier { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenShiftMachineV1Beta1MachineTemplate) DeepCopyInto(out *OpenShiftMachineV1Beta1MachineTemplate) { *out = *in - in.FailureDomains.DeepCopyInto(&out.FailureDomains) + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = new(FailureDomains) + (*in).DeepCopyInto(*out) + } in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) return @@ -940,3 +949,19 @@ func (in *Tag) DeepCopy() *Tag { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomain) DeepCopyInto(out *VSphereFailureDomain) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomain. +func (in *VSphereFailureDomain) DeepCopy() *VSphereFailureDomain { + if in == nil { + return nil + } + out := new(VSphereFailureDomain) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go index 6406220e7d..4f710ff652 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go @@ -244,10 +244,11 @@ func (ControlPlaneMachineSetTemplateObjectMeta) SwaggerDoc() map[string]string { var map_FailureDomains = map[string]string{ "": "FailureDomain represents the different configurations required to spread Machines across failure domains on different platforms.", - "platform": "Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, and GCP.", + "platform": "Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, and VSphere.", "aws": "AWS configures failure domain information for the AWS platform.", "azure": "Azure configures failure domain information for the Azure platform.", "gcp": "GCP configures failure domain information for the GCP platform.", + "vsphere": "vsphere configures failure domain information for the VSphere platform.", "openstack": "OpenStack configures failure domain information for the OpenStack platform.", } @@ -295,6 +296,15 @@ func (RootVolume) SwaggerDoc() map[string]string { return map_RootVolume } +var map_VSphereFailureDomain = map[string]string{ + "": "VSphereFailureDomain configures failure domain information for the vSphere platform", + "name": "name of the failure domain in which the vSphere machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. When balancing machines across failure domains, the control plane machine set will inject configuration from the Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain.", +} + +func (VSphereFailureDomain) SwaggerDoc() map[string]string { + return map_VSphereFailureDomain +} + var map_NutanixCategory = map[string]string{ "": "NutanixCategory identifies a pair of prism category key and value", "key": "key is the prism category key name", diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go index e3dd4d0a02..da5fbc5152 100644 --- a/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go +++ b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go @@ -92,6 +92,12 @@ type OpenstackProviderSpec struct { // The volume metadata to boot from RootVolume *RootVolume `json:"rootVolume,omitempty"` + // additionalBlockDevices is a list of specifications for additional block devices to attach to the server instance + // +optional + // +listType=map + // +listMapKey=name + AdditionalBlockDevices []AdditionalBlockDevice `json:"additionalBlockDevices,omitempty"` + // The server group to assign the machine to. ServerGroupID string `json:"serverGroupID,omitempty"` @@ -366,3 +372,68 @@ type RootVolume struct { // Deprecated: deviceType will be silently ignored. There is no replacement. DeprecatedDeviceType string `json:"deviceType,omitempty"` } + +// blockDeviceStorage is the storage type of a block device to create and +// contains additional storage options. +// +union +type BlockDeviceStorage struct { + // type is the type of block device to create. + // This can be either "Volume" or "Local". + // +kubebuilder:validation:Required + // +unionDiscriminator + Type BlockDeviceType `json:"type"` + + // volume contains additional storage options for a volume block device. + // +optional + // +unionMember,optional + Volume *BlockDeviceVolume `json:"volume,omitempty"` +} + +// blockDeviceVolume contains additional storage options for a volume block device. +type BlockDeviceVolume struct { + // type is the Cinder volume type of the volume. + // If omitted, the default Cinder volume type that is configured in the OpenStack cloud + // will be used. + // +optional + Type string `json:"type,omitempty"` + + // availabilityZone is the volume availability zone to create the volume in. + // If omitted, the availability zone of the server will be used. + // The availability zone must NOT contain spaces otherwise it will lead to volume that belongs + // to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for + // further information. + // +optional + AvailabilityZone string `json:"availabilityZone,omitempty"` +} + +// additionalBlockDevice is a block device to attach to the server. +type AdditionalBlockDevice struct { + // name of the block device in the context of a machine. + // If the block device is a volume, the Cinder volume will be named + // as a combination of the machine name and this name. + // Also, this name will be used for tagging the block device. + // Information about the block device tag can be obtained from the OpenStack + // metadata API or the config drive. + // +kubebuilder:validation:Required + Name string `json:"name"` + + // sizeGiB is the size of the block device in gibibytes (GiB). + // +kubebuilder:validation:Required + SizeGiB int `json:"sizeGiB"` + + // storage specifies the storage type of the block device and + // additional storage options. + // +kubebuilder:validation:Required + Storage BlockDeviceStorage `json:"storage"` +} + +// BlockDeviceType defines the type of block device to create. +type BlockDeviceType string + +const ( + // LocalBlockDevice is an ephemeral block device attached to the server. + LocalBlockDevice BlockDeviceType = "Local" + + // VolumeBlockDevice is a volume block device attached to the server. + VolumeBlockDevice BlockDeviceType = "Volume" +) diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go index 7210713e38..f61b35ab44 100644 --- a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go @@ -10,6 +10,23 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalBlockDevice) DeepCopyInto(out *AdditionalBlockDevice) { + *out = *in + in.Storage.DeepCopyInto(&out.Storage) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalBlockDevice. +func (in *AdditionalBlockDevice) DeepCopy() *AdditionalBlockDevice { + if in == nil { + return nil + } + out := new(AdditionalBlockDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AddressPair) DeepCopyInto(out *AddressPair) { *out = *in @@ -26,6 +43,43 @@ func (in *AddressPair) DeepCopy() *AddressPair { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceStorage) DeepCopyInto(out *BlockDeviceStorage) { + *out = *in + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(BlockDeviceVolume) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceStorage. +func (in *BlockDeviceStorage) DeepCopy() *BlockDeviceStorage { + if in == nil { + return nil + } + out := new(BlockDeviceStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceVolume) DeepCopyInto(out *BlockDeviceVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceVolume. +func (in *BlockDeviceVolume) DeepCopy() *BlockDeviceVolume { + if in == nil { + return nil + } + out := new(BlockDeviceVolume) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Filter) DeepCopyInto(out *Filter) { *out = *in @@ -165,6 +219,13 @@ func (in *OpenstackProviderSpec) DeepCopyInto(out *OpenstackProviderSpec) { *out = new(RootVolume) **out = **in } + if in.AdditionalBlockDevices != nil { + in, out := &in.AdditionalBlockDevices, &out.AdditionalBlockDevices + *out = make([]AdditionalBlockDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go index 3ea9595d26..c8094eb269 100644 --- a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go @@ -11,6 +11,37 @@ package v1alpha1 // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AdditionalBlockDevice = map[string]string{ + "": "additionalBlockDevice is a block device to attach to the server.", + "name": "name of the block device in the context of a machine. If the block device is a volume, the Cinder volume will be named as a combination of the machine name and this name. Also, this name will be used for tagging the block device. Information about the block device tag can be obtained from the OpenStack metadata API or the config drive.", + "sizeGiB": "sizeGiB is the size of the block device in gibibytes (GiB).", + "storage": "storage specifies the storage type of the block device and additional storage options.", +} + +func (AdditionalBlockDevice) SwaggerDoc() map[string]string { + return map_AdditionalBlockDevice +} + +var map_BlockDeviceStorage = map[string]string{ + "": "blockDeviceStorage is the storage type of a block device to create and contains additional storage options.", + "type": "type is the type of block device to create. This can be either \"Volume\" or \"Local\".", + "volume": "volume contains additional storage options for a volume block device.", +} + +func (BlockDeviceStorage) SwaggerDoc() map[string]string { + return map_BlockDeviceStorage +} + +var map_BlockDeviceVolume = map[string]string{ + "": "blockDeviceVolume contains additional storage options for a volume block device.", + "type": "type is the Cinder volume type of the volume. If omitted, the default Cinder volume type that is configured in the OpenStack cloud will be used.", + "availabilityZone": "availabilityZone is the volume availability zone to create the volume in. If omitted, the availability zone of the server will be used. The availability zone must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information.", +} + +func (BlockDeviceVolume) SwaggerDoc() map[string]string { + return map_BlockDeviceVolume +} + var map_Filter = map[string]string{ "id": "Deprecated: use NetworkParam.uuid instead. Ignored if NetworkParam.uuid is set.", "name": "name filters networks by name.", @@ -60,28 +91,29 @@ func (NetworkParam) SwaggerDoc() map[string]string { } var map_OpenstackProviderSpec = map[string]string{ - "": "OpenstackProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an OpenStack Instance. It is used by the Openstack machine actuator to create a single machine instance. Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "cloudsSecret": "The name of the secret containing the openstack credentials", - "cloudName": "The name of the cloud to use from the clouds secret", - "flavor": "The flavor reference for the flavor for your server instance.", - "image": "The name of the image to use for your server instance. If the RootVolume is specified, this will be ignored and use rootVolume directly.", - "keyName": "The ssh key to inject in the instance", - "sshUserName": "The machine ssh username", - "networks": "A networks object. Required parameter when there are multiple networks defined for the tenant. When you do not specify the networks parameter, the server attaches to the only network created for the current tenant.", - "ports": "Create and assign additional ports to instances", - "floatingIP": "floatingIP specifies a floating IP to be associated with the machine. Note that it is not safe to use this parameter in a MachineSet, as only one Machine may be assigned the same floating IP.\n\nDeprecated: floatingIP will be removed in a future release as it cannot be implemented correctly.", - "availabilityZone": "The availability zone from which to launch the server.", - "securityGroups": "The names of the security groups to assign to the instance", - "userDataSecret": "The name of the secret containing the user data (startup script in most cases)", - "trunk": "Whether the server instance is created on a trunk port or not.", - "tags": "Machine tags Requires Nova api 2.52 minimum!", - "serverMetadata": "Metadata mapping. Allows you to create a map of key value pairs to add to the server instance.", - "configDrive": "Config Drive support", - "rootVolume": "The volume metadata to boot from", - "serverGroupID": "The server group to assign the machine to.", - "serverGroupName": "The server group to assign the machine to. A server group with that name will be created if it does not exist. If both ServerGroupID and ServerGroupName are non-empty, they must refer to the same OpenStack resource.", - "primarySubnet": "The subnet that a set of machines will get ingress/egress traffic from", + "": "OpenstackProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an OpenStack Instance. It is used by the Openstack machine actuator to create a single machine instance. Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "cloudsSecret": "The name of the secret containing the openstack credentials", + "cloudName": "The name of the cloud to use from the clouds secret", + "flavor": "The flavor reference for the flavor for your server instance.", + "image": "The name of the image to use for your server instance. If the RootVolume is specified, this will be ignored and use rootVolume directly.", + "keyName": "The ssh key to inject in the instance", + "sshUserName": "The machine ssh username", + "networks": "A networks object. Required parameter when there are multiple networks defined for the tenant. When you do not specify the networks parameter, the server attaches to the only network created for the current tenant.", + "ports": "Create and assign additional ports to instances", + "floatingIP": "floatingIP specifies a floating IP to be associated with the machine. Note that it is not safe to use this parameter in a MachineSet, as only one Machine may be assigned the same floating IP.\n\nDeprecated: floatingIP will be removed in a future release as it cannot be implemented correctly.", + "availabilityZone": "The availability zone from which to launch the server.", + "securityGroups": "The names of the security groups to assign to the instance", + "userDataSecret": "The name of the secret containing the user data (startup script in most cases)", + "trunk": "Whether the server instance is created on a trunk port or not.", + "tags": "Machine tags Requires Nova api 2.52 minimum!", + "serverMetadata": "Metadata mapping. Allows you to create a map of key value pairs to add to the server instance.", + "configDrive": "Config Drive support", + "rootVolume": "The volume metadata to boot from", + "additionalBlockDevices": "additionalBlockDevices is a list of specifications for additional block devices to attach to the server instance", + "serverGroupID": "The server group to assign the machine to.", + "serverGroupName": "The server group to assign the machine to. A server group with that name will be created if it does not exist. If both ServerGroupID and ServerGroupName are non-empty, they must refer to the same OpenStack resource.", + "primarySubnet": "The subnet that a set of machines will get ingress/egress traffic from", } func (OpenstackProviderSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go index eba56a9429..0ffe33e481 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go @@ -193,6 +193,7 @@ type ControllerConfigStatus struct { // +listType=atomic // +optional Conditions []ControllerConfigStatusCondition `json:"conditions"` + // controllerCertificates represents the latest available observations of the automatically rotating certificates in the MCO. // +listType=atomic // +optional diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/0000_80_machineconfignode-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/0000_80_machineconfignode-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..ffd221f13a --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/0000_80_machineconfignode-CustomNoUpgrade.crd.yaml @@ -0,0 +1,203 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: machineconfignodes.machineconfiguration.openshift.io + labels: + "openshift.io/operator-managed": "" + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1596 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade +spec: + # group name to use for REST API: /apis// + group: machineconfiguration.openshift.io + scope: Cluster + names: + kind: MachineConfigNode + singular: machineconfignode + plural: machineconfignodes + versions: + - name: v1alpha1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Updated")].status + name: Updated + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePrepared")].status + name: UpdatePrepared + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateExecuted")].status + name: UpdateExecuted + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePostActionComplete")].status + name: UpdatePostActionComplete + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateComplete")].status + name: UpdateComplete + type: string + - jsonPath: .status.conditions[?(@.type=="Resumed")].status + name: Resumed + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateCompatible")].status + name: UpdateCompatible + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="AppliedFilesAndOS")].status + name: UpdatedFilesAndOS + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="CordonedNode")].status + name: CordonedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="DrainedNode")].status + name: DrainedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="RebootedNode")].status + name: RebootedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ReloadedCRIO")].status + name: ReloadedCRIO + priority: 1 + type: string + schema: + openAPIV3Schema: + description: 'MachineConfigNode describes the health of the Machines on the system Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.' + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machine config node. + type: object + required: + - configVersion + - node + - pool + properties: + configVersion: + description: configVersion holds the desired config version for the node targeted by this machine config node resource. The desired version represents the machine config the node will attempt to update to. This gets set before the machine config operator validates the new machine config against the current machine config. + type: object + required: + - desired + properties: + desired: + description: desired is the name of the machine config that the the node should be upgraded to. This value is set when the machine config pool generates a new version of its rendered configuration. When this value is changed, the machine config daemon starts the node upgrade process. This value gets set in the machine config node spec once the machine config has been targeted for upgrade and before it is validated. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + node: + description: node contains a reference to the node for this machine config node. + type: object + required: + - name + properties: + name: + description: name is the object name. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + pool: + description: pool contains a reference to the machine config pool that this machine config node's referenced node belongs to. + type: object + required: + - name + properties: + name: + description: name is the object name. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + status: + description: status describes the last observed state of this machine config node. + type: object + required: + - configVersion + properties: + conditions: + description: conditions represent the observations of a machine config node's current state. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + configVersion: + description: configVersion describes the current and desired machine config for this node. The current version represents the current machine config for the node and is updated after a successful update. The desired version represents the machine config the node will attempt to update to. This desired machine config has been compared to the current machine config and has been validated by the machine config operator as one that is valid and that exists. + type: object + required: + - desired + properties: + current: + description: current is the name of the machine config currently in use on the node. This value is updated once the machine config daemon has completed the update of the configuration for the node. This value should match the desired version unless an upgrade is in progress. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + desired: + description: desired is the MachineConfig the node wants to upgrade to. This value gets set in the machine config node status once the machine config has been validated against the current machine config. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + observedGeneration: + description: observedGeneration represents the generation observed by the controller. This field is updated when the controller observes a change to the desiredConfig in the configVersion of the machine config node spec. + type: integer + format: int64 + x-kubernetes-validations: + - rule: self.metadata.name == self.spec.node.name + message: spec.node.name should match metadata.name diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/0000_80_machineconfignode-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/0000_80_machineconfignode-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..0ed2ed8991 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/0000_80_machineconfignode-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,203 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: machineconfignodes.machineconfiguration.openshift.io + labels: + "openshift.io/operator-managed": "" + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1596 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade +spec: + # group name to use for REST API: /apis// + group: machineconfiguration.openshift.io + scope: Cluster + names: + kind: MachineConfigNode + singular: machineconfignode + plural: machineconfignodes + versions: + - name: v1alpha1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Updated")].status + name: Updated + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePrepared")].status + name: UpdatePrepared + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateExecuted")].status + name: UpdateExecuted + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePostActionComplete")].status + name: UpdatePostActionComplete + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateComplete")].status + name: UpdateComplete + type: string + - jsonPath: .status.conditions[?(@.type=="Resumed")].status + name: Resumed + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateCompatible")].status + name: UpdateCompatible + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="AppliedFilesAndOS")].status + name: UpdatedFilesAndOS + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="CordonedNode")].status + name: CordonedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="DrainedNode")].status + name: DrainedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="RebootedNode")].status + name: RebootedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ReloadedCRIO")].status + name: ReloadedCRIO + priority: 1 + type: string + schema: + openAPIV3Schema: + description: 'MachineConfigNode describes the health of the Machines on the system Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.' + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machine config node. + type: object + required: + - configVersion + - node + - pool + properties: + configVersion: + description: configVersion holds the desired config version for the node targeted by this machine config node resource. The desired version represents the machine config the node will attempt to update to. This gets set before the machine config operator validates the new machine config against the current machine config. + type: object + required: + - desired + properties: + desired: + description: desired is the name of the machine config that the the node should be upgraded to. This value is set when the machine config pool generates a new version of its rendered configuration. When this value is changed, the machine config daemon starts the node upgrade process. This value gets set in the machine config node spec once the machine config has been targeted for upgrade and before it is validated. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + node: + description: node contains a reference to the node for this machine config node. + type: object + required: + - name + properties: + name: + description: name is the object name. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + pool: + description: pool contains a reference to the machine config pool that this machine config node's referenced node belongs to. + type: object + required: + - name + properties: + name: + description: name is the object name. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + status: + description: status describes the last observed state of this machine config node. + type: object + required: + - configVersion + properties: + conditions: + description: conditions represent the observations of a machine config node's current state. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + configVersion: + description: configVersion describes the current and desired machine config for this node. The current version represents the current machine config for the node and is updated after a successful update. The desired version represents the machine config the node will attempt to update to. This desired machine config has been compared to the current machine config and has been validated by the machine config operator as one that is valid and that exists. + type: object + required: + - desired + properties: + current: + description: current is the name of the machine config currently in use on the node. This value is updated once the machine config daemon has completed the update of the configuration for the node. This value should match the desired version unless an upgrade is in progress. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + desired: + description: desired is the MachineConfig the node wants to upgrade to. This value gets set in the machine config node status once the machine config has been validated against the current machine config. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + observedGeneration: + description: observedGeneration represents the generation observed by the controller. This field is updated when the controller observes a change to the desiredConfig in the configVersion of the machine config node spec. + type: integer + format: int64 + x-kubernetes-validations: + - rule: self.metadata.name == self.spec.node.name + message: spec.node.name should match metadata.name diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/Makefile b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/Makefile new file mode 100644 index 0000000000..5943b2583a --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="machineconfiguration.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/custom.machineconfignode.testsuite.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/custom.machineconfignode.testsuite.yaml new file mode 100644 index 0000000000..3b12d072af --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/custom.machineconfignode.testsuite.yaml @@ -0,0 +1,80 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Custom] MachineConfigNode" +crd: 0000_80_machineconfignode-CustomNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal MachineConfigNode + initial: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + node: + name: foobar + pool: + name: worker + configVersion: + desired: rendered-worker-abc + expected: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + node: + name: foobar + pool: + name: worker + configVersion: + desired: rendered-worker-abc + - name: Node name must match the Object name. + initial: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + node: + name: barfoo + pool: + name: worker + configVersion: + desired: rendered-worker-abc + expectedError: "Invalid value: \"object\": spec.node.name should match metadata.name" + - name: Pool is required. + initial: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + node: + name: barfoo + configVersion: + desired: rendered-worker-abc + expectedError: "spec.pool: Required value, : Invalid value: \"null\"" + - name: Node is required. + initial: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + pool: + name: barfoo + configVersion: + desired: rendered-worker-abc + expectedError: "spec.node: Required value, : Invalid value: \"null\"" + - name: ConfigVersion is required. + initial: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + pool: + name: barfoo + node: + name: foobar + expectedError: "spec.configVersion: Required value, : Invalid value: \"null\"" diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/doc.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/doc.go new file mode 100644 index 0000000000..5876803877 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +groupName=machineconfiguration.openshift.io +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// Package v1alpha1 is the v1alpha1 version of the API. +package v1alpha1 diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/register.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/register.go new file mode 100644 index 0000000000..b18dfac66e --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/register.go @@ -0,0 +1,43 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "machineconfiguration.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &MachineConfigNode{}, + &MachineConfigNodeList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Kind is used to validate existence of a resource kind in this API group +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/techpreview.machineconfignode.testsuite.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/techpreview.machineconfignode.testsuite.yaml new file mode 100644 index 0000000000..eec4192e2e --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/techpreview.machineconfignode.testsuite.yaml @@ -0,0 +1,80 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreview] MachineConfigNode" +crd: 0000_80_machineconfignode-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal MachineConfigNode + initial: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + node: + name: foobar + pool: + name: worker + configVersion: + desired: rendered-worker-abc + expected: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + node: + name: foobar + pool: + name: worker + configVersion: + desired: rendered-worker-abc + - name: Node name must match the Object name. + initial: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + node: + name: barfoo + pool: + name: worker + configVersion: + desired: rendered-worker-abc + expectedError: "Invalid value: \"object\": spec.node.name should match metadata.name" + - name: Pool is required. + initial: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + node: + name: barfoo + configVersion: + desired: rendered-worker-abc + expectedError: "spec.pool: Required value, : Invalid value: \"null\"" + - name: Node is required. + initial: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + pool: + name: barfoo + configVersion: + desired: rendered-worker-abc + expectedError: "spec.node: Required value, : Invalid value: \"null\"" + - name: ConfigVersion is required. + initial: | + apiVersion: machineconfiguration.openshift.io/v1alpha1 + kind: MachineConfigNode + metadata: + name: foobar + spec: + pool: + name: barfoo + node: + name: foobar + expectedError: "spec.configVersion: Required value, : Invalid value: \"null\"" diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineconfignode.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineconfignode.go new file mode 100644 index 0000000000..06cddaaf63 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineconfignode.go @@ -0,0 +1,168 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineConfigNode describes the health of the Machines on the system +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +kubebuilder:validation:XValidation:rule="self.metadata.name == self.spec.node.name",message="spec.node.name should match metadata.name" +type MachineConfigNode struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec describes the configuration of the machine config node. + // +kubebuilder:validation:Required + Spec MachineConfigNodeSpec `json:"spec"` + + // status describes the last observed state of this machine config node. + // +optional + Status MachineConfigNodeStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineConfigNodeList describes all of the MachinesStates on the system +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type MachineConfigNodeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []MachineConfigNode `json:"items"` +} + +// MCOObjectReference holds information about an object the MCO either owns +// or modifies in some way +type MCOObjectReference struct { + // name is the object name. + // Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // It may consist of only alphanumeric characters, hyphens (-) and periods (.) + // and must be at most 253 characters in length. + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + // +kubebuilder:validation:Required + Name string `json:"name"` +} + +// MachineConfigNodeSpec describes the MachineConfigNode we are managing. +type MachineConfigNodeSpec struct { + // node contains a reference to the node for this machine config node. + // +kubebuilder:validation:Required + Node MCOObjectReference `json:"node"` + + // pool contains a reference to the machine config pool that this machine config node's + // referenced node belongs to. + // +kubebuilder:validation:Required + Pool MCOObjectReference `json:"pool"` + + // configVersion holds the desired config version for the node targeted by this machine config node resource. + // The desired version represents the machine config the node will attempt to update to. This gets set before the machine config operator validates + // the new machine config against the current machine config. + // +kubebuilder:validation:Required + ConfigVersion MachineConfigNodeSpecMachineConfigVersion `json:"configVersion"` +} + +// MachineConfigNodeStatus holds the reported information on a particular machine config node. +type MachineConfigNodeStatus struct { + // conditions represent the observations of a machine config node's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // observedGeneration represents the generation observed by the controller. + // This field is updated when the controller observes a change to the desiredConfig in the configVersion of the machine config node spec. + // +kubebuilder:validation:Required + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // configVersion describes the current and desired machine config for this node. + // The current version represents the current machine config for the node and is updated after a successful update. + // The desired version represents the machine config the node will attempt to update to. + // This desired machine config has been compared to the current machine config and has been validated by the machine config operator as one that is valid and that exists. + // +kubebuilder:validation:Required + ConfigVersion MachineConfigNodeStatusMachineConfigVersion `json:"configVersion"` +} + +// MachineConfigNodeStatusMachineConfigVersion holds the current and desired config versions as last updated in the MCN status. +// When the current and desired versions are not matched, the machine config pool is processing an upgrade and the machine config node will +// monitor the upgrade process. +// When the current and desired versions do not match, +// the machine config node will ignore these events given that certain operations happen both during the MCO's upgrade mode and the daily operations mode. +type MachineConfigNodeStatusMachineConfigVersion struct { + // current is the name of the machine config currently in use on the node. + // This value is updated once the machine config daemon has completed the update of the configuration for the node. + // This value should match the desired version unless an upgrade is in progress. + // Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // It may consist of only alphanumeric characters, hyphens (-) and periods (.) + // and must be at most 253 characters in length. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + // +optional + Current string `json:"current"` + // desired is the MachineConfig the node wants to upgrade to. + // This value gets set in the machine config node status once the machine config has been validated + // against the current machine config. + // Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // It may consist of only alphanumeric characters, hyphens (-) and periods (.) + // and must be at most 253 characters in length. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + // +kubebuilder:validation:Required + Desired string `json:"desired"` +} + +// MachineConfigNodeSpecMachineConfigVersion holds the desired config version for the current observed machine config node. +// When Current is not equal to Desired; the MachineConfigOperator is in an upgrade phase and the machine config node will +// take account of upgrade related events. Otherwise they will be ignored given that certain operations +// happen both during the MCO's upgrade mode and the daily operations mode. +type MachineConfigNodeSpecMachineConfigVersion struct { + // desired is the name of the machine config that the the node should be upgraded to. + // This value is set when the machine config pool generates a new version of its rendered configuration. + // When this value is changed, the machine config daemon starts the node upgrade process. + // This value gets set in the machine config node spec once the machine config has been targeted for upgrade and before it is validated. + // Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // It may consist of only alphanumeric characters, hyphens (-) and periods (.) + // and must be at most 253 characters in length. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + // +kubebuilder:validation:Required + Desired string `json:"desired"` +} + +// StateProgress is each possible state for each possible MachineConfigNodeType +// UpgradeProgression Kind will only use the "MachinConfigPoolUpdate..." types for example +// Please note: These conditions are subject to change. Both additions and deletions may be made. +type StateProgress string + +const ( + // MachineConfigNodeUpdatePrepared describes a machine that is preparing in the daemon to trigger an update + MachineConfigNodeUpdatePrepared StateProgress = "UpdatePrepared" + // MachineConfigNodeUpdateExecuted describes a machine that has executed the body of the upgrade + MachineConfigNodeUpdateExecuted StateProgress = "UpdateExecuted" + // MachineConfigNodeUpdatePostActionComplete describes a machine that has executed its post update action + MachineConfigNodeUpdatePostActionComplete StateProgress = "UpdatePostActionComplete" + // MachineConfigNodeUpdateComplete describes a machine that has completed the core parts of an upgrade. + MachineConfigNodeUpdateComplete StateProgress = "UpdateComplete" + // MachineConfigNodeUpdated describes a machine that has a matching desired and current config after executing an update + MachineConfigNodeUpdated StateProgress = "Updated" + // MachineConfigNodeUpdateResumed describes a machine that has resumed normal processes + MachineConfigNodeResumed StateProgress = "Resumed" + // MachineConfigNodeUpdateCompatible the part of the preparing phase where the mco decides whether it can update + MachineConfigNodeUpdateCompatible StateProgress = "UpdateCompatible" + // MachineConfigNodeUpdateDrained describes the part of the inprogress phase where the node drains + MachineConfigNodeUpdateDrained StateProgress = "Drained" + // MachineConfigNodeUpdateFilesAndOS describes the part of the inprogress phase where the nodes file and OS config change + MachineConfigNodeUpdateFilesAndOS StateProgress = "AppliedFilesAndOS" + // MachineConfigNodeUpdateCordoned describes the part of the completing phase where the node cordons + MachineConfigNodeUpdateCordoned StateProgress = "Cordoned" + // MachineConfigNodeUpdateRebooted describes the part of the post action phase where the node reboots itself + MachineConfigNodeUpdateRebooted StateProgress = "RebootedNode" + // MachineConfigNodeUpdateReloaded describes the part of the post action phase where the node reloads its CRIO service + MachineConfigNodeUpdateReloaded StateProgress = "ReloadedCRIO" +) diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..d2ab9b91af --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,163 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MCOObjectReference) DeepCopyInto(out *MCOObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MCOObjectReference. +func (in *MCOObjectReference) DeepCopy() *MCOObjectReference { + if in == nil { + return nil + } + out := new(MCOObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNode) DeepCopyInto(out *MachineConfigNode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNode. +func (in *MachineConfigNode) DeepCopy() *MachineConfigNode { + if in == nil { + return nil + } + out := new(MachineConfigNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineConfigNode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeList) DeepCopyInto(out *MachineConfigNodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineConfigNode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeList. +func (in *MachineConfigNodeList) DeepCopy() *MachineConfigNodeList { + if in == nil { + return nil + } + out := new(MachineConfigNodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineConfigNodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeSpec) DeepCopyInto(out *MachineConfigNodeSpec) { + *out = *in + out.Node = in.Node + out.Pool = in.Pool + out.ConfigVersion = in.ConfigVersion + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeSpec. +func (in *MachineConfigNodeSpec) DeepCopy() *MachineConfigNodeSpec { + if in == nil { + return nil + } + out := new(MachineConfigNodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeSpecMachineConfigVersion) DeepCopyInto(out *MachineConfigNodeSpecMachineConfigVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeSpecMachineConfigVersion. +func (in *MachineConfigNodeSpecMachineConfigVersion) DeepCopy() *MachineConfigNodeSpecMachineConfigVersion { + if in == nil { + return nil + } + out := new(MachineConfigNodeSpecMachineConfigVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeStatus) DeepCopyInto(out *MachineConfigNodeStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.ConfigVersion = in.ConfigVersion + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeStatus. +func (in *MachineConfigNodeStatus) DeepCopy() *MachineConfigNodeStatus { + if in == nil { + return nil + } + out := new(MachineConfigNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeStatusMachineConfigVersion) DeepCopyInto(out *MachineConfigNodeStatusMachineConfigVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeStatusMachineConfigVersion. +func (in *MachineConfigNodeStatusMachineConfigVersion) DeepCopy() *MachineConfigNodeStatusMachineConfigVersion { + if in == nil { + return nil + } + out := new(MachineConfigNodeStatusMachineConfigVersion) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000..0044c9c4b6 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,82 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_MCOObjectReference = map[string]string{ + "": "MCOObjectReference holds information about an object the MCO either owns or modifies in some way", + "name": "name is the object name. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length.", +} + +func (MCOObjectReference) SwaggerDoc() map[string]string { + return map_MCOObjectReference +} + +var map_MachineConfigNode = map[string]string{ + "": "MachineConfigNode describes the health of the Machines on the system Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "spec": "spec describes the configuration of the machine config node.", + "status": "status describes the last observed state of this machine config node.", +} + +func (MachineConfigNode) SwaggerDoc() map[string]string { + return map_MachineConfigNode +} + +var map_MachineConfigNodeList = map[string]string{ + "": "MachineConfigNodeList describes all of the MachinesStates on the system\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (MachineConfigNodeList) SwaggerDoc() map[string]string { + return map_MachineConfigNodeList +} + +var map_MachineConfigNodeSpec = map[string]string{ + "": "MachineConfigNodeSpec describes the MachineConfigNode we are managing.", + "node": "node contains a reference to the node for this machine config node.", + "pool": "pool contains a reference to the machine config pool that this machine config node's referenced node belongs to.", + "configVersion": "configVersion holds the desired config version for the node targeted by this machine config node resource. The desired version represents the machine config the node will attempt to update to. This gets set before the machine config operator validates the new machine config against the current machine config.", +} + +func (MachineConfigNodeSpec) SwaggerDoc() map[string]string { + return map_MachineConfigNodeSpec +} + +var map_MachineConfigNodeSpecMachineConfigVersion = map[string]string{ + "": "MachineConfigNodeSpecMachineConfigVersion holds the desired config version for the current observed machine config node. When Current is not equal to Desired; the MachineConfigOperator is in an upgrade phase and the machine config node will take account of upgrade related events. Otherwise they will be ignored given that certain operations happen both during the MCO's upgrade mode and the daily operations mode.", + "desired": "desired is the name of the machine config that the the node should be upgraded to. This value is set when the machine config pool generates a new version of its rendered configuration. When this value is changed, the machine config daemon starts the node upgrade process. This value gets set in the machine config node spec once the machine config has been targeted for upgrade and before it is validated. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length.", +} + +func (MachineConfigNodeSpecMachineConfigVersion) SwaggerDoc() map[string]string { + return map_MachineConfigNodeSpecMachineConfigVersion +} + +var map_MachineConfigNodeStatus = map[string]string{ + "": "MachineConfigNodeStatus holds the reported information on a particular machine config node.", + "conditions": "conditions represent the observations of a machine config node's current state.", + "observedGeneration": "observedGeneration represents the generation observed by the controller. This field is updated when the controller observes a change to the desiredConfig in the configVersion of the machine config node spec.", + "configVersion": "configVersion describes the current and desired machine config for this node. The current version represents the current machine config for the node and is updated after a successful update. The desired version represents the machine config the node will attempt to update to. This desired machine config has been compared to the current machine config and has been validated by the machine config operator as one that is valid and that exists.", +} + +func (MachineConfigNodeStatus) SwaggerDoc() map[string]string { + return map_MachineConfigNodeStatus +} + +var map_MachineConfigNodeStatusMachineConfigVersion = map[string]string{ + "": "MachineConfigNodeStatusMachineConfigVersion holds the current and desired config versions as last updated in the MCN status. When the current and desired versions are not matched, the machine config pool is processing an upgrade and the machine config node will monitor the upgrade process. When the current and desired versions do not match, the machine config node will ignore these events given that certain operations happen both during the MCO's upgrade mode and the daily operations mode.", + "current": "current is the name of the machine config currently in use on the node. This value is updated once the machine config daemon has completed the update of the configuration for the node. This value should match the desired version unless an upgrade is in progress. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length.", + "desired": "desired is the MachineConfig the node wants to upgrade to. This value gets set in the machine config node status once the machine config has been validated against the current machine config. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) It may consist of only alphanumeric characters, hyphens (-) and periods (.) and must be at most 253 characters in length.", +} + +func (MachineConfigNodeStatusMachineConfigVersion) SwaggerDoc() map[string]string { + return map_MachineConfigNodeStatusMachineConfigVersion +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/network/.codegen.yaml b/vendor/github.com/openshift/api/network/.codegen.yaml new file mode 100644 index 0000000000..55f3a272c8 --- /dev/null +++ b/vendor/github.com/openshift/api/network/.codegen.yaml @@ -0,0 +1,6 @@ +schemapatch: + requiredFeatureSets: + - "" + - "Default" + - "TechPreviewNoUpgrade" + - "CustomNoUpgrade" diff --git a/vendor/github.com/openshift/api/network/install.go b/vendor/github.com/openshift/api/network/install.go index 85bc706236..fbaa079b3f 100644 --- a/vendor/github.com/openshift/api/network/install.go +++ b/vendor/github.com/openshift/api/network/install.go @@ -5,6 +5,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" networkv1 "github.com/openshift/api/network/v1" + networkv1alpha1 "github.com/openshift/api/network/v1alpha1" ) const ( @@ -12,7 +13,7 @@ const ( ) var ( - schemeBuilder = runtime.NewSchemeBuilder(networkv1.Install) + schemeBuilder = runtime.NewSchemeBuilder(networkv1.Install, networkv1alpha1.Install) // Install is a function which adds every version of this group to a scheme Install = schemeBuilder.AddToScheme ) diff --git a/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml new file mode 100644 index 0000000000..19ad00b875 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml @@ -0,0 +1,154 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1524 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: dnsnameresolvers.network.openshift.io +spec: + group: network.openshift.io + names: + kind: DNSNameResolver + listKind: DNSNameResolverList + plural: dnsnameresolvers + singular: dnsnameresolver + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set. It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the DNSNameResolver. + properties: + name: + description: name is the DNS name for which the DNS name resolution information will be stored. For a regular DNS name, only the DNS name resolution information of the regular DNS name will be stored. For a wildcard DNS name, the DNS name resolution information of all the DNS names that match the wildcard DNS name will be stored. For a wildcard DNS name, the '*' will match only one label. Additionally, only a single '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.' + maxLength: 254 + pattern: ^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$ + type: string + x-kubernetes-validations: + - message: spec.name is immutable + rule: self == oldSelf + required: + - name + type: object + status: + description: status is the most recently observed status of the DNSNameResolver. + properties: + resolvedNames: + description: resolvedNames contains a list of matching DNS names and their corresponding IP addresses along with their TTL and last DNS lookup times. + items: + description: DNSNameResolverResolvedName describes the details of a resolved DNS name. + properties: + conditions: + description: 'conditions provide information about the state of the DNS name. Known .status.conditions.type is: "Degraded". "Degraded" is true when the last resolution failed for the DNS name, and false otherwise.' + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + dnsName: + description: dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can store both regular and wildcard DNS names which match the spec.name field. When the spec.name field contains a regular DNS name, this field will store the same regular DNS name after it is successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard DNS name as well. + maxLength: 254 + pattern: ^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$ + type: string + resolutionFailures: + description: resolutionFailures keeps the count of how many consecutive times the DNS resolution failed for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon every failure, the value of the field will be incremented by one. The details about the DNS name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the associated IP addresses have expired. + format: int32 + type: integer + resolvedAddresses: + description: resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last lookup times for the dnsName. + items: + description: DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name. + properties: + ip: + anyOf: + - format: ipv4 + - format: ipv6 + description: ip is an IP address associated with the dnsName. The validity of the IP address expires after lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon the expiration of the IP address's validity. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + type: string + lastLookupTime: + description: lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to the current time on a successful DNS lookup. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + format: date-time + type: string + ttlSeconds: + description: ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with the current time-to-live value. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + format: int32 + type: integer + required: + - ip + - lastLookupTime + - ttlSeconds + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + required: + - dnsName + - resolvedAddresses + type: object + type: array + x-kubernetes-list-map-keys: + - dnsName + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml-patch b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml-patch new file mode 100644 index 0000000000..975ae7c93f --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml-patch @@ -0,0 +1,5 @@ +- op: add + path: /spec/versions/name=v1alpha1/schema/openAPIV3Schema/properties/status/properties/resolvedNames/items/properties/resolvedAddresses/items/properties/ip/anyOf + value: + - format: ipv4 + - format: ipv6 diff --git a/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml new file mode 100644 index 0000000000..e4c3c25412 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml @@ -0,0 +1,154 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1524 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: dnsnameresolvers.network.openshift.io +spec: + group: network.openshift.io + names: + kind: DNSNameResolver + listKind: DNSNameResolverList + plural: dnsnameresolvers + singular: dnsnameresolver + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set. It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the DNSNameResolver. + properties: + name: + description: name is the DNS name for which the DNS name resolution information will be stored. For a regular DNS name, only the DNS name resolution information of the regular DNS name will be stored. For a wildcard DNS name, the DNS name resolution information of all the DNS names that match the wildcard DNS name will be stored. For a wildcard DNS name, the '*' will match only one label. Additionally, only a single '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.' + maxLength: 254 + pattern: ^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$ + type: string + x-kubernetes-validations: + - message: spec.name is immutable + rule: self == oldSelf + required: + - name + type: object + status: + description: status is the most recently observed status of the DNSNameResolver. + properties: + resolvedNames: + description: resolvedNames contains a list of matching DNS names and their corresponding IP addresses along with their TTL and last DNS lookup times. + items: + description: DNSNameResolverResolvedName describes the details of a resolved DNS name. + properties: + conditions: + description: 'conditions provide information about the state of the DNS name. Known .status.conditions.type is: "Degraded". "Degraded" is true when the last resolution failed for the DNS name, and false otherwise.' + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + dnsName: + description: dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can store both regular and wildcard DNS names which match the spec.name field. When the spec.name field contains a regular DNS name, this field will store the same regular DNS name after it is successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard DNS name as well. + maxLength: 254 + pattern: ^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$ + type: string + resolutionFailures: + description: resolutionFailures keeps the count of how many consecutive times the DNS resolution failed for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon every failure, the value of the field will be incremented by one. The details about the DNS name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the associated IP addresses have expired. + format: int32 + type: integer + resolvedAddresses: + description: resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last lookup times for the dnsName. + items: + description: DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name. + properties: + ip: + anyOf: + - format: ipv4 + - format: ipv6 + description: ip is an IP address associated with the dnsName. The validity of the IP address expires after lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon the expiration of the IP address's validity. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + type: string + lastLookupTime: + description: lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to the current time on a successful DNS lookup. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + format: date-time + type: string + ttlSeconds: + description: ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with the current time-to-live value. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + format: int32 + type: integer + required: + - ip + - lastLookupTime + - ttlSeconds + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + required: + - dnsName + - resolvedAddresses + type: object + type: array + x-kubernetes-list-map-keys: + - dnsName + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml-patch b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml-patch new file mode 100644 index 0000000000..975ae7c93f --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml-patch @@ -0,0 +1,5 @@ +- op: add + path: /spec/versions/name=v1alpha1/schema/openAPIV3Schema/properties/status/properties/resolvedNames/items/properties/resolvedAddresses/items/properties/ip/anyOf + value: + - format: ipv4 + - format: ipv6 diff --git a/vendor/github.com/openshift/api/network/v1alpha1/Makefile b/vendor/github.com/openshift/api/network/v1alpha1/Makefile new file mode 100644 index 0000000000..376fee2dc0 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="network.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/network/v1alpha1/custom.dnsnameresolver.testsuite.yaml b/vendor/github.com/openshift/api/network/v1alpha1/custom.dnsnameresolver.testsuite.yaml new file mode 100644 index 0000000000..24175b6d73 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/custom.dnsnameresolver.testsuite.yaml @@ -0,0 +1,402 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[CustomNoUpgrade] DNSNameResolver" +crd: 0000_70_dnsnameresolver_00-techpreview.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal DNSNameResolver with a regular DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example.com. + - name: Should be able to create a minimal DNSNameResolver with a wildcard DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: "*.example.com." + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: "*.example.com." + - name: Should be able to specify DNS name with a '-' in a label + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example-domain.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example-domain.com. + - name: Should not be able to specify invalid DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www_example_com + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"www_example_com\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name with a label starting with '-' + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: -example.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"-example.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name with a label ending with '-' + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: example-.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"example-.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name without a trailing period + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"www.example.com\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify just the TLD in a DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a wildcard before TLD in DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: "*.com." + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"*.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a DNS name with a label containing uppercase letters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: ABCD.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"ABCD.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a DNS name with a label containing more than 63 characters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz123456789012.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz123456789012.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should be able to specify a DNS name with a label containing 63 characters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz12345678901.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz12345678901.com. + onUpdate: + - name: Should not be able to update spec.name field + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.newexample.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"string\": spec.name is immutable" + - name: Should be able to add valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "192.168.1.1" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "192.168.1.1" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add lowest valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "0.0.0.0" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "0.0.0.0" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add highest valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "255.255.255.255" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "255.255.255.255" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should not be able to add invalid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "256.256.256.256" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expectedStatusError: "DNSNameResolver.network.openshift.io \"example\" is invalid: [: Invalid value: \"\": \"status.resolvedNames[0].resolvedAddresses[0].ip\" must validate at least one schema (anyOf), status.resolvedNames[0].resolvedAddresses[0].ip: Invalid value: \"256.256.256.256\": status.resolvedNames[0].resolvedAddresses[0].ip in body must be of type ipv4: \"256.256.256.256\", : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]" + - name: Should be able to add valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "2001:db8:3333:4444:5555:6666:7777:8888" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "2001:db8:3333:4444:5555:6666:7777:8888" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add lowest valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "::" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "::" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add highest valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should not be able to add invalid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "10000:10000:10000:10000:10000:10000:10000:10000" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expectedStatusError: "DNSNameResolver.network.openshift.io \"example\" is invalid: [: Invalid value: \"\": \"status.resolvedNames[0].resolvedAddresses[0].ip\" must validate at least one schema (anyOf), status.resolvedNames[0].resolvedAddresses[0].ip: Invalid value: \"10000:10000:10000:10000:10000:10000:10000:10000\": status.resolvedNames[0].resolvedAddresses[0].ip in body must be of type ipv4: \"10000:10000:10000:10000:10000:10000:10000:10000\", : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]" diff --git a/vendor/github.com/openshift/api/network/v1alpha1/doc.go b/vendor/github.com/openshift/api/network/v1alpha1/doc.go new file mode 100644 index 0000000000..35539c458c --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=network.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/api/network/v1alpha1/register.go b/vendor/github.com/openshift/api/network/v1alpha1/register.go new file mode 100644 index 0000000000..6d80c234ba --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/register.go @@ -0,0 +1,40 @@ +package v1alpha1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "network.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &DNSNameResolver{}, + &DNSNameResolverList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/techpreview.dnsnameresolver.testsuite.yaml b/vendor/github.com/openshift/api/network/v1alpha1/techpreview.dnsnameresolver.testsuite.yaml new file mode 100644 index 0000000000..411e5ffcdc --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/techpreview.dnsnameresolver.testsuite.yaml @@ -0,0 +1,402 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreviewNoUpgrade] DNSNameResolver" +crd: 0000_70_dnsnameresolver_00-techpreview.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal DNSNameResolver + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example.com. + - name: Should be able to create a minimal DNSNameResolver with a wildcard DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: "*.example.com." + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: "*.example.com." + - name: Should be able to specify DNS name with a '-' in a label + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example-domain.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example-domain.com. + - name: Should not be able to specify invalid DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www_example_com + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"www_example_com\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name with a label starting with '-' + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: -example.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"-example.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name with a label ending with '-' + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: example-.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"example-.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name without a trailing period + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"www.example.com\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify just the TLD in a DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a wildcard before TLD in DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: "*.com." + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"*.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a DNS name with a label containing uppercase letters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: ABCD.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"ABCD.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a DNS name with a label containing more than 63 characters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz123456789012.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz123456789012.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should be able to specify a DNS name with a label containing 63 characters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz12345678901.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz12345678901.com. + onUpdate: + - name: Should not be able to update spec.name field + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.newexample.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"string\": spec.name is immutable" + - name: Should be able to add valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "192.168.1.1" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "192.168.1.1" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add lowest valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "0.0.0.0" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "0.0.0.0" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add highest valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "255.255.255.255" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "255.255.255.255" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should not be able to add invalid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "256.256.256.256" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expectedStatusError: "DNSNameResolver.network.openshift.io \"example\" is invalid: [: Invalid value: \"\": \"status.resolvedNames[0].resolvedAddresses[0].ip\" must validate at least one schema (anyOf), status.resolvedNames[0].resolvedAddresses[0].ip: Invalid value: \"256.256.256.256\": status.resolvedNames[0].resolvedAddresses[0].ip in body must be of type ipv4: \"256.256.256.256\", : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]" + - name: Should be able to add valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "2001:db8:3333:4444:5555:6666:7777:8888" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "2001:db8:3333:4444:5555:6666:7777:8888" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add lowest valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "::" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "::" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add highest valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should not be able to add invalid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "10000:10000:10000:10000:10000:10000:10000:10000" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expectedStatusError: "DNSNameResolver.network.openshift.io \"example\" is invalid: [: Invalid value: \"\": \"status.resolvedNames[0].resolvedAddresses[0].ip\" must validate at least one schema (anyOf), status.resolvedNames[0].resolvedAddresses[0].ip: Invalid value: \"10000:10000:10000:10000:10000:10000:10000:10000\": status.resolvedNames[0].resolvedAddresses[0].ip in body must be of type ipv4: \"10000:10000:10000:10000:10000:10000:10000:10000\", : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]" diff --git a/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go new file mode 100644 index 0000000000..4e0199d7e7 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go @@ -0,0 +1,139 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +openshift:compatibility-gen:level=4 + +// DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set. +// It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +type DNSNameResolver struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the DNSNameResolver. + // +kubebuilder:validation:Required + Spec DNSNameResolverSpec `json:"spec"` + // status is the most recently observed status of the DNSNameResolver. + // +optional + Status DNSNameResolverStatus `json:"status,omitempty"` +} + +// DNSName is used for validation of a DNS name. +// +kubebuilder:validation:Pattern=`^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$` +// +kubebuilder:validation:MaxLength=254 +type DNSName string + +// DNSNameResolverSpec is a desired state description of DNSNameResolver. +type DNSNameResolverSpec struct { + // name is the DNS name for which the DNS name resolution information will be stored. + // For a regular DNS name, only the DNS name resolution information of the regular DNS + // name will be stored. For a wildcard DNS name, the DNS name resolution information + // of all the DNS names that match the wildcard DNS name will be stored. + // For a wildcard DNS name, the '*' will match only one label. Additionally, only a single + // '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' + // will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.' + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="spec.name is immutable" + Name DNSName `json:"name"` +} + +// DNSNameResolverStatus defines the observed status of DNSNameResolver. +type DNSNameResolverStatus struct { + // resolvedNames contains a list of matching DNS names and their corresponding IP addresses + // along with their TTL and last DNS lookup times. + // +listType=map + // +listMapKey=dnsName + // +patchMergeKey=dnsName + // +patchStrategy=merge + // +optional + ResolvedNames []DNSNameResolverResolvedName `json:"resolvedNames,omitempty" patchStrategy:"merge" patchMergeKey:"dnsName"` +} + +// DNSNameResolverResolvedName describes the details of a resolved DNS name. +type DNSNameResolverResolvedName struct { + // conditions provide information about the state of the DNS name. + // Known .status.conditions.type is: "Degraded". + // "Degraded" is true when the last resolution failed for the DNS name, + // and false otherwise. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can + // store both regular and wildcard DNS names which match the spec.name field. When the spec.name + // field contains a regular DNS name, this field will store the same regular DNS name after it is + // successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName + // will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. + // If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard + // DNS name as well. + // +kubebuilder:validation:Required + DNSName DNSName `json:"dnsName"` + + // resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last + // lookup times for the dnsName. + // +kubebuilder:validation:Required + // +listType=map + // +listMapKey=ip + ResolvedAddresses []DNSNameResolverResolvedAddress `json:"resolvedAddresses"` + + // resolutionFailures keeps the count of how many consecutive times the DNS resolution failed + // for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon + // every failure, the value of the field will be incremented by one. The details about the DNS + // name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the + // associated IP addresses have expired. + ResolutionFailures int32 `json:"resolutionFailures,omitempty"` +} + +// DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name. +type DNSNameResolverResolvedAddress struct { + // ip is an IP address associated with the dnsName. The validity of the IP address expires after + // lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon + // the expiration of the IP address's validity. If the information is not refreshed then it will + // be removed with a grace period after the expiration of the IP address's validity. + // +kubebuilder:validation:Required + IP string `json:"ip"` + + // ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after + // lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with + // the current time-to-live value. If the information is not refreshed then it will be removed with a + // grace period after the expiration of the IP address's validity. + // +kubebuilder:validation:Required + TTLSeconds int32 `json:"ttlSeconds"` + + // lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of + // the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to + // the current time on a successful DNS lookup. If the information is not refreshed then it will be + // removed with a grace period after the expiration of the IP address's validity. + // +kubebuilder:validation:Required + LastLookupTime *metav1.Time `json:"lastLookupTime"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +openshift:compatibility-gen:level=4 + +// DNSNameResolverList contains a list of DNSNameResolvers. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +type DNSNameResolverList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + // items gives the list of DNSNameResolvers. + Items []DNSNameResolver `json:"items"` +} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..b8308c3f83 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,161 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolver) DeepCopyInto(out *DNSNameResolver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolver. +func (in *DNSNameResolver) DeepCopy() *DNSNameResolver { + if in == nil { + return nil + } + out := new(DNSNameResolver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSNameResolver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverList) DeepCopyInto(out *DNSNameResolverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNSNameResolver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverList. +func (in *DNSNameResolverList) DeepCopy() *DNSNameResolverList { + if in == nil { + return nil + } + out := new(DNSNameResolverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSNameResolverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverResolvedAddress) DeepCopyInto(out *DNSNameResolverResolvedAddress) { + *out = *in + if in.LastLookupTime != nil { + in, out := &in.LastLookupTime, &out.LastLookupTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverResolvedAddress. +func (in *DNSNameResolverResolvedAddress) DeepCopy() *DNSNameResolverResolvedAddress { + if in == nil { + return nil + } + out := new(DNSNameResolverResolvedAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverResolvedName) DeepCopyInto(out *DNSNameResolverResolvedName) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResolvedAddresses != nil { + in, out := &in.ResolvedAddresses, &out.ResolvedAddresses + *out = make([]DNSNameResolverResolvedAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverResolvedName. +func (in *DNSNameResolverResolvedName) DeepCopy() *DNSNameResolverResolvedName { + if in == nil { + return nil + } + out := new(DNSNameResolverResolvedName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverSpec) DeepCopyInto(out *DNSNameResolverSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverSpec. +func (in *DNSNameResolverSpec) DeepCopy() *DNSNameResolverSpec { + if in == nil { + return nil + } + out := new(DNSNameResolverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverStatus) DeepCopyInto(out *DNSNameResolverStatus) { + *out = *in + if in.ResolvedNames != nil { + in, out := &in.ResolvedNames, &out.ResolvedNames + *out = make([]DNSNameResolverResolvedName, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverStatus. +func (in *DNSNameResolverStatus) DeepCopy() *DNSNameResolverStatus { + if in == nil { + return nil + } + out := new(DNSNameResolverStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000..e5018a9736 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,76 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DNSNameResolver = map[string]string{ + "": "DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set. It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the DNSNameResolver.", + "status": "status is the most recently observed status of the DNSNameResolver.", +} + +func (DNSNameResolver) SwaggerDoc() map[string]string { + return map_DNSNameResolver +} + +var map_DNSNameResolverList = map[string]string{ + "": "DNSNameResolverList contains a list of DNSNameResolvers.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items gives the list of DNSNameResolvers.", +} + +func (DNSNameResolverList) SwaggerDoc() map[string]string { + return map_DNSNameResolverList +} + +var map_DNSNameResolverResolvedAddress = map[string]string{ + "": "DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name.", + "ip": "ip is an IP address associated with the dnsName. The validity of the IP address expires after lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon the expiration of the IP address's validity. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.", + "ttlSeconds": "ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with the current time-to-live value. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.", + "lastLookupTime": "lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to the current time on a successful DNS lookup. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.", +} + +func (DNSNameResolverResolvedAddress) SwaggerDoc() map[string]string { + return map_DNSNameResolverResolvedAddress +} + +var map_DNSNameResolverResolvedName = map[string]string{ + "": "DNSNameResolverResolvedName describes the details of a resolved DNS name.", + "conditions": "conditions provide information about the state of the DNS name. Known .status.conditions.type is: \"Degraded\". \"Degraded\" is true when the last resolution failed for the DNS name, and false otherwise.", + "dnsName": "dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can store both regular and wildcard DNS names which match the spec.name field. When the spec.name field contains a regular DNS name, this field will store the same regular DNS name after it is successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard DNS name as well.", + "resolvedAddresses": "resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last lookup times for the dnsName.", + "resolutionFailures": "resolutionFailures keeps the count of how many consecutive times the DNS resolution failed for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon every failure, the value of the field will be incremented by one. The details about the DNS name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the associated IP addresses have expired.", +} + +func (DNSNameResolverResolvedName) SwaggerDoc() map[string]string { + return map_DNSNameResolverResolvedName +} + +var map_DNSNameResolverSpec = map[string]string{ + "": "DNSNameResolverSpec is a desired state description of DNSNameResolver.", + "name": "name is the DNS name for which the DNS name resolution information will be stored. For a regular DNS name, only the DNS name resolution information of the regular DNS name will be stored. For a wildcard DNS name, the DNS name resolution information of all the DNS names that match the wildcard DNS name will be stored. For a wildcard DNS name, the '*' will match only one label. Additionally, only a single '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.'", +} + +func (DNSNameResolverSpec) SwaggerDoc() map[string]string { + return map_DNSNameResolverSpec +} + +var map_DNSNameResolverStatus = map[string]string{ + "": "DNSNameResolverStatus defines the observed status of DNSNameResolver.", + "resolvedNames": "resolvedNames contains a list of matching DNS names and their corresponding IP addresses along with their TTL and last DNS lookup times.", +} + +func (DNSNameResolverStatus) SwaggerDoc() map[string]string { + return map_DNSNameResolverStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go index 05f0d795de..be364a5e37 100644 --- a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go @@ -200,19 +200,21 @@ const ( OpenShiftServiceAccountController OpenShiftControllerName = "openshift.io/serviceaccount" OpenShiftDefaultRoleBindingsController OpenShiftControllerName = "openshift.io/default-rolebindings" OpenShiftServiceAccountPullSecretsController OpenShiftControllerName = "openshift.io/serviceaccount-pull-secrets" - OpenshiftOriginNamespaceController OpenShiftControllerName = "openshift.io/origin-namespace" - OpenshiftBuildController OpenShiftControllerName = "openshift.io/build" - OpenshiftBuildConfigChangeController OpenShiftControllerName = "openshift.io/build-config-change" + OpenShiftOriginNamespaceController OpenShiftControllerName = "openshift.io/origin-namespace" + OpenShiftBuildController OpenShiftControllerName = "openshift.io/build" + OpenShiftBuildConfigChangeController OpenShiftControllerName = "openshift.io/build-config-change" OpenShiftBuilderServiceAccountController OpenShiftControllerName = "openshift.io/builder-serviceaccount" - OpenshiftDeployerController OpenShiftControllerName = "openshift.io/deployer" + OpenShiftDeployerController OpenShiftControllerName = "openshift.io/deployer" OpenShiftDeployerServiceAccountController OpenShiftControllerName = "openshift.io/deployer-serviceaccount" - OpenshiftDeploymentConfigController OpenShiftControllerName = "openshift.io/deploymentconfig" - OpenshiftImageTriggerController OpenShiftControllerName = "openshift.io/image-trigger" - OpenshiftImageImportController OpenShiftControllerName = "openshift.io/image-import" - OpenshiftImageSignatureImportController OpenShiftControllerName = "openshift.io/image-signature-import" - OpenshiftTemplateInstanceController OpenShiftControllerName = "openshift.io/templateinstance" - OpenshiftTemplateInstanceFinalizerController OpenShiftControllerName = "openshift.io/templateinstancefinalizer" - OpenshiftUnidlingController OpenShiftControllerName = "openshift.io/unidling" + OpenShiftDeploymentConfigController OpenShiftControllerName = "openshift.io/deploymentconfig" + OpenShiftImageTriggerController OpenShiftControllerName = "openshift.io/image-trigger" + OpenShiftImageImportController OpenShiftControllerName = "openshift.io/image-import" + OpenShiftImageSignatureImportController OpenShiftControllerName = "openshift.io/image-signature-import" + OpenShiftTemplateInstanceController OpenShiftControllerName = "openshift.io/templateinstance" + OpenShiftTemplateInstanceFinalizerController OpenShiftControllerName = "openshift.io/templateinstancefinalizer" + OpenShiftUnidlingController OpenShiftControllerName = "openshift.io/unidling" + OpenShiftIngressIPController OpenShiftControllerName = "openshift.io/ingress-ip" + OpenShiftIngressToRouteController OpenShiftControllerName = "openshift.io/ingress-to-route" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go index 56ebbfc013..f152d261a8 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go @@ -14,6 +14,7 @@ type AuthenticationSpecApplyConfiguration struct { WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticatorApplyConfiguration `json:"webhookTokenAuthenticators,omitempty"` WebhookTokenAuthenticator *WebhookTokenAuthenticatorApplyConfiguration `json:"webhookTokenAuthenticator,omitempty"` ServiceAccountIssuer *string `json:"serviceAccountIssuer,omitempty"` + OIDCProviders []OIDCProviderApplyConfiguration `json:"oidcProviders,omitempty"` } // AuthenticationSpecApplyConfiguration constructs an declarative configuration of the AuthenticationSpec type for use with @@ -66,3 +67,16 @@ func (b *AuthenticationSpecApplyConfiguration) WithServiceAccountIssuer(value st b.ServiceAccountIssuer = &value return b } + +// WithOIDCProviders adds the given value to the OIDCProviders field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OIDCProviders field. +func (b *AuthenticationSpecApplyConfiguration) WithOIDCProviders(values ...*OIDCProviderApplyConfiguration) *AuthenticationSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOIDCProviders") + } + b.OIDCProviders = append(b.OIDCProviders, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go new file mode 100644 index 0000000000..c554a26624 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go @@ -0,0 +1,55 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OIDCProviderApplyConfiguration represents an declarative configuration of the OIDCProvider type for use +// with apply. +type OIDCProviderApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Issuer *TokenIssuerApplyConfiguration `json:"issuer,omitempty"` + ClaimMappings *TokenClaimMappingsApplyConfiguration `json:"claimMappings,omitempty"` + ClaimValidationRules []TokenClaimValidationRuleApplyConfiguration `json:"claimValidationRules,omitempty"` +} + +// OIDCProviderApplyConfiguration constructs an declarative configuration of the OIDCProvider type for use with +// apply. +func OIDCProvider() *OIDCProviderApplyConfiguration { + return &OIDCProviderApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OIDCProviderApplyConfiguration) WithName(value string) *OIDCProviderApplyConfiguration { + b.Name = &value + return b +} + +// WithIssuer sets the Issuer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Issuer field is set to the value of the last call. +func (b *OIDCProviderApplyConfiguration) WithIssuer(value *TokenIssuerApplyConfiguration) *OIDCProviderApplyConfiguration { + b.Issuer = value + return b +} + +// WithClaimMappings sets the ClaimMappings field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClaimMappings field is set to the value of the last call. +func (b *OIDCProviderApplyConfiguration) WithClaimMappings(value *TokenClaimMappingsApplyConfiguration) *OIDCProviderApplyConfiguration { + b.ClaimMappings = value + return b +} + +// WithClaimValidationRules adds the given value to the ClaimValidationRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ClaimValidationRules field. +func (b *OIDCProviderApplyConfiguration) WithClaimValidationRules(values ...*TokenClaimValidationRuleApplyConfiguration) *OIDCProviderApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithClaimValidationRules") + } + b.ClaimValidationRules = append(b.ClaimValidationRules, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go new file mode 100644 index 0000000000..fedc364e3f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PrefixedClaimMappingApplyConfiguration represents an declarative configuration of the PrefixedClaimMapping type for use +// with apply. +type PrefixedClaimMappingApplyConfiguration struct { + TokenClaimMappingApplyConfiguration `json:",inline"` + Prefix *string `json:"prefix,omitempty"` +} + +// PrefixedClaimMappingApplyConfiguration constructs an declarative configuration of the PrefixedClaimMapping type for use with +// apply. +func PrefixedClaimMapping() *PrefixedClaimMappingApplyConfiguration { + return &PrefixedClaimMappingApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *PrefixedClaimMappingApplyConfiguration) WithClaim(value string) *PrefixedClaimMappingApplyConfiguration { + b.Claim = &value + return b +} + +// WithPrefix sets the Prefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Prefix field is set to the value of the last call. +func (b *PrefixedClaimMappingApplyConfiguration) WithPrefix(value string) *PrefixedClaimMappingApplyConfiguration { + b.Prefix = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go new file mode 100644 index 0000000000..91c29b61c4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TokenClaimMappingApplyConfiguration represents an declarative configuration of the TokenClaimMapping type for use +// with apply. +type TokenClaimMappingApplyConfiguration struct { + Claim *string `json:"claim,omitempty"` +} + +// TokenClaimMappingApplyConfiguration constructs an declarative configuration of the TokenClaimMapping type for use with +// apply. +func TokenClaimMapping() *TokenClaimMappingApplyConfiguration { + return &TokenClaimMappingApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *TokenClaimMappingApplyConfiguration) WithClaim(value string) *TokenClaimMappingApplyConfiguration { + b.Claim = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go new file mode 100644 index 0000000000..1a2fdb0953 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TokenClaimMappingsApplyConfiguration represents an declarative configuration of the TokenClaimMappings type for use +// with apply. +type TokenClaimMappingsApplyConfiguration struct { + Username *UsernameClaimMappingApplyConfiguration `json:"username,omitempty"` + Groups *PrefixedClaimMappingApplyConfiguration `json:"groups,omitempty"` +} + +// TokenClaimMappingsApplyConfiguration constructs an declarative configuration of the TokenClaimMappings type for use with +// apply. +func TokenClaimMappings() *TokenClaimMappingsApplyConfiguration { + return &TokenClaimMappingsApplyConfiguration{} +} + +// WithUsername sets the Username field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Username field is set to the value of the last call. +func (b *TokenClaimMappingsApplyConfiguration) WithUsername(value *UsernameClaimMappingApplyConfiguration) *TokenClaimMappingsApplyConfiguration { + b.Username = value + return b +} + +// WithGroups sets the Groups field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Groups field is set to the value of the last call. +func (b *TokenClaimMappingsApplyConfiguration) WithGroups(value *PrefixedClaimMappingApplyConfiguration) *TokenClaimMappingsApplyConfiguration { + b.Groups = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go new file mode 100644 index 0000000000..6793f93279 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// TokenClaimValidationRuleApplyConfiguration represents an declarative configuration of the TokenClaimValidationRule type for use +// with apply. +type TokenClaimValidationRuleApplyConfiguration struct { + Type *v1.TokenValidationRuleType `json:"type,omitempty"` + RequiredClaim *TokenRequiredClaimApplyConfiguration `json:"requiredClaim,omitempty"` +} + +// TokenClaimValidationRuleApplyConfiguration constructs an declarative configuration of the TokenClaimValidationRule type for use with +// apply. +func TokenClaimValidationRule() *TokenClaimValidationRuleApplyConfiguration { + return &TokenClaimValidationRuleApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *TokenClaimValidationRuleApplyConfiguration) WithType(value v1.TokenValidationRuleType) *TokenClaimValidationRuleApplyConfiguration { + b.Type = &value + return b +} + +// WithRequiredClaim sets the RequiredClaim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequiredClaim field is set to the value of the last call. +func (b *TokenClaimValidationRuleApplyConfiguration) WithRequiredClaim(value *TokenRequiredClaimApplyConfiguration) *TokenClaimValidationRuleApplyConfiguration { + b.RequiredClaim = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go new file mode 100644 index 0000000000..808e61a1db --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// TokenIssuerApplyConfiguration represents an declarative configuration of the TokenIssuer type for use +// with apply. +type TokenIssuerApplyConfiguration struct { + URL *string `json:"issuerURL,omitempty"` + Audiences []v1.TokenAudience `json:"audiences,omitempty"` + CertificateAuthority *ConfigMapNameReferenceApplyConfiguration `json:"issuerCertificateAuthority,omitempty"` +} + +// TokenIssuerApplyConfiguration constructs an declarative configuration of the TokenIssuer type for use with +// apply. +func TokenIssuer() *TokenIssuerApplyConfiguration { + return &TokenIssuerApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *TokenIssuerApplyConfiguration) WithURL(value string) *TokenIssuerApplyConfiguration { + b.URL = &value + return b +} + +// WithAudiences adds the given value to the Audiences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Audiences field. +func (b *TokenIssuerApplyConfiguration) WithAudiences(values ...v1.TokenAudience) *TokenIssuerApplyConfiguration { + for i := range values { + b.Audiences = append(b.Audiences, values[i]) + } + return b +} + +// WithCertificateAuthority sets the CertificateAuthority field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CertificateAuthority field is set to the value of the last call. +func (b *TokenIssuerApplyConfiguration) WithCertificateAuthority(value *ConfigMapNameReferenceApplyConfiguration) *TokenIssuerApplyConfiguration { + b.CertificateAuthority = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go new file mode 100644 index 0000000000..f7ae34d017 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TokenRequiredClaimApplyConfiguration represents an declarative configuration of the TokenRequiredClaim type for use +// with apply. +type TokenRequiredClaimApplyConfiguration struct { + Claim *string `json:"claim,omitempty"` + RequiredValue *string `json:"requiredValue,omitempty"` +} + +// TokenRequiredClaimApplyConfiguration constructs an declarative configuration of the TokenRequiredClaim type for use with +// apply. +func TokenRequiredClaim() *TokenRequiredClaimApplyConfiguration { + return &TokenRequiredClaimApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *TokenRequiredClaimApplyConfiguration) WithClaim(value string) *TokenRequiredClaimApplyConfiguration { + b.Claim = &value + return b +} + +// WithRequiredValue sets the RequiredValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequiredValue field is set to the value of the last call. +func (b *TokenRequiredClaimApplyConfiguration) WithRequiredValue(value string) *TokenRequiredClaimApplyConfiguration { + b.RequiredValue = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go new file mode 100644 index 0000000000..641fb48b28 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// UsernameClaimMappingApplyConfiguration represents an declarative configuration of the UsernameClaimMapping type for use +// with apply. +type UsernameClaimMappingApplyConfiguration struct { + TokenClaimMappingApplyConfiguration `json:",inline"` + PrefixPolicy *configv1.UsernamePrefixPolicy `json:"prefixPolicy,omitempty"` + Prefix *UsernamePrefixApplyConfiguration `json:"prefix,omitempty"` +} + +// UsernameClaimMappingApplyConfiguration constructs an declarative configuration of the UsernameClaimMapping type for use with +// apply. +func UsernameClaimMapping() *UsernameClaimMappingApplyConfiguration { + return &UsernameClaimMappingApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *UsernameClaimMappingApplyConfiguration) WithClaim(value string) *UsernameClaimMappingApplyConfiguration { + b.Claim = &value + return b +} + +// WithPrefixPolicy sets the PrefixPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PrefixPolicy field is set to the value of the last call. +func (b *UsernameClaimMappingApplyConfiguration) WithPrefixPolicy(value configv1.UsernamePrefixPolicy) *UsernameClaimMappingApplyConfiguration { + b.PrefixPolicy = &value + return b +} + +// WithPrefix sets the Prefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Prefix field is set to the value of the last call. +func (b *UsernameClaimMappingApplyConfiguration) WithPrefix(value *UsernamePrefixApplyConfiguration) *UsernameClaimMappingApplyConfiguration { + b.Prefix = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go new file mode 100644 index 0000000000..b95bc9ba64 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// UsernamePrefixApplyConfiguration represents an declarative configuration of the UsernamePrefix type for use +// with apply. +type UsernamePrefixApplyConfiguration struct { + PrefixString *string `json:"prefixString,omitempty"` +} + +// UsernamePrefixApplyConfiguration constructs an declarative configuration of the UsernamePrefix type for use with +// apply. +func UsernamePrefix() *UsernamePrefixApplyConfiguration { + return &UsernamePrefixApplyConfiguration{} +} + +// WithPrefixString sets the PrefixString field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PrefixString field is set to the value of the last call. +func (b *UsernamePrefixApplyConfiguration) WithPrefixString(value string) *UsernamePrefixApplyConfiguration { + b.PrefixString = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go index 1e1d65169a..a16213812e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go @@ -11,6 +11,7 @@ type VSpherePlatformTopologyApplyConfiguration struct { Datastore *string `json:"datastore,omitempty"` ResourcePool *string `json:"resourcePool,omitempty"` Folder *string `json:"folder,omitempty"` + Template *string `json:"template,omitempty"` } // VSpherePlatformTopologyApplyConfiguration constructs an declarative configuration of the VSpherePlatformTopology type for use with @@ -68,3 +69,11 @@ func (b *VSpherePlatformTopologyApplyConfiguration) WithFolder(value string) *VS b.Folder = &value return b } + +// WithTemplate sets the Template field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Template field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithTemplate(value string) *VSpherePlatformTopologyApplyConfiguration { + b.Template = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index ae75a55084..57a6ee38af 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -270,6 +270,14 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.ConfigMapNameReference default: {} + - name: oidcProviders + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.OIDCProvider + elementRelationship: associative + keys: + - name - name: serviceAccountIssuer type: scalar: string @@ -2171,6 +2179,27 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.SecretNameReference default: {} +- name: com.github.openshift.api.config.v1.OIDCProvider + map: + fields: + - name: claimMappings + type: + namedType: com.github.openshift.api.config.v1.TokenClaimMappings + default: {} + - name: claimValidationRules + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.TokenClaimValidationRule + elementRelationship: atomic + - name: issuer + type: + namedType: com.github.openshift.api.config.v1.TokenIssuer + default: {} + - name: name + type: + scalar: string + default: "" - name: com.github.openshift.api.config.v1.ObjectReference map: fields: @@ -2565,6 +2594,17 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.PrefixedClaimMapping + map: + fields: + - name: claim + type: + scalar: string + default: "" + - name: prefix + type: + scalar: string + default: "" - name: com.github.openshift.api.config.v1.Project map: fields: @@ -2905,6 +2945,27 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.TokenClaimMappings + map: + fields: + - name: groups + type: + namedType: com.github.openshift.api.config.v1.PrefixedClaimMapping + default: {} + - name: username + type: + namedType: com.github.openshift.api.config.v1.UsernameClaimMapping + default: {} +- name: com.github.openshift.api.config.v1.TokenClaimValidationRule + map: + fields: + - name: requiredClaim + type: + namedType: com.github.openshift.api.config.v1.TokenRequiredClaim + - name: type + type: + scalar: string + default: "" - name: com.github.openshift.api.config.v1.TokenConfig map: fields: @@ -2917,6 +2978,34 @@ var schemaYAML = typed.YAMLObject(`types: - name: accessTokenMaxAgeSeconds type: scalar: numeric +- name: com.github.openshift.api.config.v1.TokenIssuer + map: + fields: + - name: audiences + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: issuerCertificateAuthority + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: issuerURL + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.TokenRequiredClaim + map: + fields: + - name: claim + type: + scalar: string + default: "" + - name: requiredValue + type: + scalar: string + default: "" - name: com.github.openshift.api.config.v1.Update map: fields: @@ -2965,6 +3054,27 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.UsernameClaimMapping + map: + fields: + - name: claim + type: + scalar: string + default: "" + - name: prefix + type: + namedType: com.github.openshift.api.config.v1.UsernamePrefix + - name: prefixPolicy + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.UsernamePrefix + map: + fields: + - name: prefixString + type: + scalar: string + default: "" - name: com.github.openshift.api.config.v1.VSpherePlatformFailureDomainSpec map: fields: @@ -3101,6 +3211,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: resourcePool type: scalar: string + - name: template + type: + scalar: string - name: com.github.openshift.api.config.v1.VSpherePlatformVCenterSpec map: fields: diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go index d2a687f0fe..fe3397900a 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go @@ -73,6 +73,239 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: __untyped_deduced_ elementRelationship: separable +- name: com.github.openshift.api.machineconfiguration.v1alpha1.MCOObjectReference + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.machineconfiguration.v1alpha1.MachineConfigNode + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.MachineConfigNodeSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.MachineConfigNodeStatus + default: {} +- name: com.github.openshift.api.machineconfiguration.v1alpha1.MachineConfigNodeSpec + map: + fields: + - name: configVersion + type: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.MachineConfigNodeSpecMachineConfigVersion + default: {} + - name: node + type: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.MCOObjectReference + default: {} + - name: pool + type: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.MCOObjectReference + default: {} +- name: com.github.openshift.api.machineconfiguration.v1alpha1.MachineConfigNodeSpecMachineConfigVersion + map: + fields: + - name: desired + type: + scalar: string + default: "" +- name: com.github.openshift.api.machineconfiguration.v1alpha1.MachineConfigNodeStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: configVersion + type: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.MachineConfigNodeStatusMachineConfigVersion + default: {} + - name: observedGeneration + type: + scalar: numeric +- name: com.github.openshift.api.machineconfiguration.v1alpha1.MachineConfigNodeStatusMachineConfigVersion + map: + fields: + - name: current + type: + scalar: string + default: "" + - name: desired + type: + scalar: string + default: "" +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message + type: + scalar: string + default: "" + - name: observedGeneration + type: + scalar: numeric + - name: reason + type: + scalar: string + default: "" + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped - name: __untyped_atomic_ scalar: untyped list: diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignode.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignode.go new file mode 100644 index 0000000000..2f3cee6923 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignode.go @@ -0,0 +1,240 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + machineconfigurationv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + internal "github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MachineConfigNodeApplyConfiguration represents an declarative configuration of the MachineConfigNode type for use +// with apply. +type MachineConfigNodeApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *MachineConfigNodeSpecApplyConfiguration `json:"spec,omitempty"` + Status *MachineConfigNodeStatusApplyConfiguration `json:"status,omitempty"` +} + +// MachineConfigNode constructs an declarative configuration of the MachineConfigNode type for use with +// apply. +func MachineConfigNode(name string) *MachineConfigNodeApplyConfiguration { + b := &MachineConfigNodeApplyConfiguration{} + b.WithName(name) + b.WithKind("MachineConfigNode") + b.WithAPIVersion("machineconfiguration.openshift.io/v1alpha1") + return b +} + +// ExtractMachineConfigNode extracts the applied configuration owned by fieldManager from +// machineConfigNode. If no managedFields are found in machineConfigNode for fieldManager, a +// MachineConfigNodeApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// machineConfigNode must be a unmodified MachineConfigNode API object that was retrieved from the Kubernetes API. +// ExtractMachineConfigNode provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractMachineConfigNode(machineConfigNode *machineconfigurationv1alpha1.MachineConfigNode, fieldManager string) (*MachineConfigNodeApplyConfiguration, error) { + return extractMachineConfigNode(machineConfigNode, fieldManager, "") +} + +// ExtractMachineConfigNodeStatus is the same as ExtractMachineConfigNode except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractMachineConfigNodeStatus(machineConfigNode *machineconfigurationv1alpha1.MachineConfigNode, fieldManager string) (*MachineConfigNodeApplyConfiguration, error) { + return extractMachineConfigNode(machineConfigNode, fieldManager, "status") +} + +func extractMachineConfigNode(machineConfigNode *machineconfigurationv1alpha1.MachineConfigNode, fieldManager string, subresource string) (*MachineConfigNodeApplyConfiguration, error) { + b := &MachineConfigNodeApplyConfiguration{} + err := managedfields.ExtractInto(machineConfigNode, internal.Parser().Type("com.github.openshift.api.machineconfiguration.v1alpha1.MachineConfigNode"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(machineConfigNode.Name) + + b.WithKind("MachineConfigNode") + b.WithAPIVersion("machineconfiguration.openshift.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithKind(value string) *MachineConfigNodeApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithAPIVersion(value string) *MachineConfigNodeApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithName(value string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithGenerateName(value string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithNamespace(value string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithUID(value types.UID) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithResourceVersion(value string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithGeneration(value int64) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *MachineConfigNodeApplyConfiguration) WithLabels(entries map[string]string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *MachineConfigNodeApplyConfiguration) WithAnnotations(entries map[string]string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *MachineConfigNodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *MachineConfigNodeApplyConfiguration) WithFinalizers(values ...string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *MachineConfigNodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithSpec(value *MachineConfigNodeSpecApplyConfiguration) *MachineConfigNodeApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithStatus(value *MachineConfigNodeStatusApplyConfiguration) *MachineConfigNodeApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodespec.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodespec.go new file mode 100644 index 0000000000..ff7a6429dd --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodespec.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// MachineConfigNodeSpecApplyConfiguration represents an declarative configuration of the MachineConfigNodeSpec type for use +// with apply. +type MachineConfigNodeSpecApplyConfiguration struct { + Node *MCOObjectReferenceApplyConfiguration `json:"node,omitempty"` + Pool *MCOObjectReferenceApplyConfiguration `json:"pool,omitempty"` + ConfigVersion *MachineConfigNodeSpecMachineConfigVersionApplyConfiguration `json:"configVersion,omitempty"` +} + +// MachineConfigNodeSpecApplyConfiguration constructs an declarative configuration of the MachineConfigNodeSpec type for use with +// apply. +func MachineConfigNodeSpec() *MachineConfigNodeSpecApplyConfiguration { + return &MachineConfigNodeSpecApplyConfiguration{} +} + +// WithNode sets the Node field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Node field is set to the value of the last call. +func (b *MachineConfigNodeSpecApplyConfiguration) WithNode(value *MCOObjectReferenceApplyConfiguration) *MachineConfigNodeSpecApplyConfiguration { + b.Node = value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *MachineConfigNodeSpecApplyConfiguration) WithPool(value *MCOObjectReferenceApplyConfiguration) *MachineConfigNodeSpecApplyConfiguration { + b.Pool = value + return b +} + +// WithConfigVersion sets the ConfigVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigVersion field is set to the value of the last call. +func (b *MachineConfigNodeSpecApplyConfiguration) WithConfigVersion(value *MachineConfigNodeSpecMachineConfigVersionApplyConfiguration) *MachineConfigNodeSpecApplyConfiguration { + b.ConfigVersion = value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodespecmachineconfigversion.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodespecmachineconfigversion.go new file mode 100644 index 0000000000..f3190fa89a --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodespecmachineconfigversion.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// MachineConfigNodeSpecMachineConfigVersionApplyConfiguration represents an declarative configuration of the MachineConfigNodeSpecMachineConfigVersion type for use +// with apply. +type MachineConfigNodeSpecMachineConfigVersionApplyConfiguration struct { + Desired *string `json:"desired,omitempty"` +} + +// MachineConfigNodeSpecMachineConfigVersionApplyConfiguration constructs an declarative configuration of the MachineConfigNodeSpecMachineConfigVersion type for use with +// apply. +func MachineConfigNodeSpecMachineConfigVersion() *MachineConfigNodeSpecMachineConfigVersionApplyConfiguration { + return &MachineConfigNodeSpecMachineConfigVersionApplyConfiguration{} +} + +// WithDesired sets the Desired field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Desired field is set to the value of the last call. +func (b *MachineConfigNodeSpecMachineConfigVersionApplyConfiguration) WithDesired(value string) *MachineConfigNodeSpecMachineConfigVersionApplyConfiguration { + b.Desired = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodestatus.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodestatus.go new file mode 100644 index 0000000000..188c0352f8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodestatus.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MachineConfigNodeStatusApplyConfiguration represents an declarative configuration of the MachineConfigNodeStatus type for use +// with apply. +type MachineConfigNodeStatusApplyConfiguration struct { + Conditions []v1.Condition `json:"conditions,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + ConfigVersion *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration `json:"configVersion,omitempty"` +} + +// MachineConfigNodeStatusApplyConfiguration constructs an declarative configuration of the MachineConfigNodeStatus type for use with +// apply. +func MachineConfigNodeStatus() *MachineConfigNodeStatusApplyConfiguration { + return &MachineConfigNodeStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *MachineConfigNodeStatusApplyConfiguration) WithConditions(values ...v1.Condition) *MachineConfigNodeStatusApplyConfiguration { + for i := range values { + b.Conditions = append(b.Conditions, values[i]) + } + return b +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *MachineConfigNodeStatusApplyConfiguration) WithObservedGeneration(value int64) *MachineConfigNodeStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithConfigVersion sets the ConfigVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigVersion field is set to the value of the last call. +func (b *MachineConfigNodeStatusApplyConfiguration) WithConfigVersion(value *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration) *MachineConfigNodeStatusApplyConfiguration { + b.ConfigVersion = value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodestatusmachineconfigversion.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodestatusmachineconfigversion.go new file mode 100644 index 0000000000..05b8110ed6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/machineconfignodestatusmachineconfigversion.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// MachineConfigNodeStatusMachineConfigVersionApplyConfiguration represents an declarative configuration of the MachineConfigNodeStatusMachineConfigVersion type for use +// with apply. +type MachineConfigNodeStatusMachineConfigVersionApplyConfiguration struct { + Current *string `json:"current,omitempty"` + Desired *string `json:"desired,omitempty"` +} + +// MachineConfigNodeStatusMachineConfigVersionApplyConfiguration constructs an declarative configuration of the MachineConfigNodeStatusMachineConfigVersion type for use with +// apply. +func MachineConfigNodeStatusMachineConfigVersion() *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration { + return &MachineConfigNodeStatusMachineConfigVersionApplyConfiguration{} +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration) WithCurrent(value string) *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration { + b.Current = &value + return b +} + +// WithDesired sets the Desired field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Desired field is set to the value of the last call. +func (b *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration) WithDesired(value string) *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration { + b.Desired = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/mcoobjectreference.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/mcoobjectreference.go new file mode 100644 index 0000000000..7b45ffdf73 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/mcoobjectreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// MCOObjectReferenceApplyConfiguration represents an declarative configuration of the MCOObjectReference type for use +// with apply. +type MCOObjectReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// MCOObjectReferenceApplyConfiguration constructs an declarative configuration of the MCOObjectReference type for use with +// apply. +func MCOObjectReference() *MCOObjectReferenceApplyConfiguration { + return &MCOObjectReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MCOObjectReferenceApplyConfiguration) WithName(value string) *MCOObjectReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/clientset.go index 28041f8f5b..f58c537850 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/clientset.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/clientset.go @@ -7,6 +7,7 @@ import ( "net/http" machineconfigurationv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1" + machineconfigurationv1alpha1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -15,12 +16,14 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface MachineconfigurationV1() machineconfigurationv1.MachineconfigurationV1Interface + MachineconfigurationV1alpha1() machineconfigurationv1alpha1.MachineconfigurationV1alpha1Interface } // Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient - machineconfigurationV1 *machineconfigurationv1.MachineconfigurationV1Client + machineconfigurationV1 *machineconfigurationv1.MachineconfigurationV1Client + machineconfigurationV1alpha1 *machineconfigurationv1alpha1.MachineconfigurationV1alpha1Client } // MachineconfigurationV1 retrieves the MachineconfigurationV1Client @@ -28,6 +31,11 @@ func (c *Clientset) MachineconfigurationV1() machineconfigurationv1.Machineconfi return c.machineconfigurationV1 } +// MachineconfigurationV1alpha1 retrieves the MachineconfigurationV1alpha1Client +func (c *Clientset) MachineconfigurationV1alpha1() machineconfigurationv1alpha1.MachineconfigurationV1alpha1Interface { + return c.machineconfigurationV1alpha1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -76,6 +84,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.machineconfigurationV1alpha1, err = machineconfigurationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { @@ -98,6 +110,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.machineconfigurationV1 = machineconfigurationv1.New(c) + cs.machineconfigurationV1alpha1 = machineconfigurationv1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake/clientset_generated.go index 6d79f136ab..1eecf8fc6e 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake/clientset_generated.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake/clientset_generated.go @@ -6,6 +6,8 @@ import ( clientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" machineconfigurationv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1" fakemachineconfigurationv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake" + machineconfigurationv1alpha1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1" + fakemachineconfigurationv1alpha1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -67,3 +69,8 @@ var ( func (c *Clientset) MachineconfigurationV1() machineconfigurationv1.MachineconfigurationV1Interface { return &fakemachineconfigurationv1.FakeMachineconfigurationV1{Fake: &c.Fake} } + +// MachineconfigurationV1alpha1 retrieves the MachineconfigurationV1alpha1Client +func (c *Clientset) MachineconfigurationV1alpha1() machineconfigurationv1alpha1.MachineconfigurationV1alpha1Interface { + return &fakemachineconfigurationv1alpha1.FakeMachineconfigurationV1alpha1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake/register.go index c49bc90a48..2714f8e4db 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake/register.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake/register.go @@ -4,6 +4,7 @@ package fake import ( machineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + machineconfigurationv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -16,6 +17,7 @@ var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ machineconfigurationv1.AddToScheme, + machineconfigurationv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme/register.go index 522eedb9ad..9bda95eacb 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme/register.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme/register.go @@ -4,6 +4,7 @@ package scheme import ( machineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + machineconfigurationv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -16,6 +17,7 @@ var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ machineconfigurationv1.AddToScheme, + machineconfigurationv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/doc.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/doc.go new file mode 100644 index 0000000000..93a7ca4e0e --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/fake/doc.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/fake/doc.go new file mode 100644 index 0000000000..2b5ba4c8e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/fake/fake_machineconfignode.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/fake/fake_machineconfignode.go new file mode 100644 index 0000000000..6a01842243 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/fake/fake_machineconfignode.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + machineconfigurationv1alpha1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeMachineConfigNodes implements MachineConfigNodeInterface +type FakeMachineConfigNodes struct { + Fake *FakeMachineconfigurationV1alpha1 +} + +var machineconfignodesResource = v1alpha1.SchemeGroupVersion.WithResource("machineconfignodes") + +var machineconfignodesKind = v1alpha1.SchemeGroupVersion.WithKind("MachineConfigNode") + +// Get takes name of the machineConfigNode, and returns the corresponding machineConfigNode object, and an error if there is any. +func (c *FakeMachineConfigNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.MachineConfigNode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(machineconfignodesResource, name), &v1alpha1.MachineConfigNode{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MachineConfigNode), err +} + +// List takes label and field selectors, and returns the list of MachineConfigNodes that match those selectors. +func (c *FakeMachineConfigNodes) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.MachineConfigNodeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(machineconfignodesResource, machineconfignodesKind, opts), &v1alpha1.MachineConfigNodeList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.MachineConfigNodeList{ListMeta: obj.(*v1alpha1.MachineConfigNodeList).ListMeta} + for _, item := range obj.(*v1alpha1.MachineConfigNodeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested machineConfigNodes. +func (c *FakeMachineConfigNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(machineconfignodesResource, opts)) +} + +// Create takes the representation of a machineConfigNode and creates it. Returns the server's representation of the machineConfigNode, and an error, if there is any. +func (c *FakeMachineConfigNodes) Create(ctx context.Context, machineConfigNode *v1alpha1.MachineConfigNode, opts v1.CreateOptions) (result *v1alpha1.MachineConfigNode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(machineconfignodesResource, machineConfigNode), &v1alpha1.MachineConfigNode{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MachineConfigNode), err +} + +// Update takes the representation of a machineConfigNode and updates it. Returns the server's representation of the machineConfigNode, and an error, if there is any. +func (c *FakeMachineConfigNodes) Update(ctx context.Context, machineConfigNode *v1alpha1.MachineConfigNode, opts v1.UpdateOptions) (result *v1alpha1.MachineConfigNode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(machineconfignodesResource, machineConfigNode), &v1alpha1.MachineConfigNode{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MachineConfigNode), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeMachineConfigNodes) UpdateStatus(ctx context.Context, machineConfigNode *v1alpha1.MachineConfigNode, opts v1.UpdateOptions) (*v1alpha1.MachineConfigNode, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(machineconfignodesResource, "status", machineConfigNode), &v1alpha1.MachineConfigNode{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MachineConfigNode), err +} + +// Delete takes name of the machineConfigNode and deletes it. Returns an error if one occurs. +func (c *FakeMachineConfigNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(machineconfignodesResource, name, opts), &v1alpha1.MachineConfigNode{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeMachineConfigNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(machineconfignodesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.MachineConfigNodeList{}) + return err +} + +// Patch applies the patch and returns the patched machineConfigNode. +func (c *FakeMachineConfigNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.MachineConfigNode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(machineconfignodesResource, name, pt, data, subresources...), &v1alpha1.MachineConfigNode{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MachineConfigNode), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied machineConfigNode. +func (c *FakeMachineConfigNodes) Apply(ctx context.Context, machineConfigNode *machineconfigurationv1alpha1.MachineConfigNodeApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MachineConfigNode, err error) { + if machineConfigNode == nil { + return nil, fmt.Errorf("machineConfigNode provided to Apply must not be nil") + } + data, err := json.Marshal(machineConfigNode) + if err != nil { + return nil, err + } + name := machineConfigNode.Name + if name == nil { + return nil, fmt.Errorf("machineConfigNode.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(machineconfignodesResource, *name, types.ApplyPatchType, data), &v1alpha1.MachineConfigNode{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MachineConfigNode), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeMachineConfigNodes) ApplyStatus(ctx context.Context, machineConfigNode *machineconfigurationv1alpha1.MachineConfigNodeApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MachineConfigNode, err error) { + if machineConfigNode == nil { + return nil, fmt.Errorf("machineConfigNode provided to Apply must not be nil") + } + data, err := json.Marshal(machineConfigNode) + if err != nil { + return nil, err + } + name := machineConfigNode.Name + if name == nil { + return nil, fmt.Errorf("machineConfigNode.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(machineconfignodesResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.MachineConfigNode{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MachineConfigNode), err +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/fake/fake_machineconfiguration_client.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/fake/fake_machineconfiguration_client.go new file mode 100644 index 0000000000..425ee2c28f --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/fake/fake_machineconfiguration_client.go @@ -0,0 +1,24 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeMachineconfigurationV1alpha1 struct { + *testing.Fake +} + +func (c *FakeMachineconfigurationV1alpha1) MachineConfigNodes() v1alpha1.MachineConfigNodeInterface { + return &FakeMachineConfigNodes{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeMachineconfigurationV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/generated_expansion.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..37de120197 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/generated_expansion.go @@ -0,0 +1,5 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type MachineConfigNodeExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfignode.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfignode.go new file mode 100644 index 0000000000..cd4117043a --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfignode.go @@ -0,0 +1,227 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + machineconfigurationv1alpha1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1" + scheme "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// MachineConfigNodesGetter has a method to return a MachineConfigNodeInterface. +// A group's client should implement this interface. +type MachineConfigNodesGetter interface { + MachineConfigNodes() MachineConfigNodeInterface +} + +// MachineConfigNodeInterface has methods to work with MachineConfigNode resources. +type MachineConfigNodeInterface interface { + Create(ctx context.Context, machineConfigNode *v1alpha1.MachineConfigNode, opts v1.CreateOptions) (*v1alpha1.MachineConfigNode, error) + Update(ctx context.Context, machineConfigNode *v1alpha1.MachineConfigNode, opts v1.UpdateOptions) (*v1alpha1.MachineConfigNode, error) + UpdateStatus(ctx context.Context, machineConfigNode *v1alpha1.MachineConfigNode, opts v1.UpdateOptions) (*v1alpha1.MachineConfigNode, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.MachineConfigNode, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.MachineConfigNodeList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.MachineConfigNode, err error) + Apply(ctx context.Context, machineConfigNode *machineconfigurationv1alpha1.MachineConfigNodeApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MachineConfigNode, err error) + ApplyStatus(ctx context.Context, machineConfigNode *machineconfigurationv1alpha1.MachineConfigNodeApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MachineConfigNode, err error) + MachineConfigNodeExpansion +} + +// machineConfigNodes implements MachineConfigNodeInterface +type machineConfigNodes struct { + client rest.Interface +} + +// newMachineConfigNodes returns a MachineConfigNodes +func newMachineConfigNodes(c *MachineconfigurationV1alpha1Client) *machineConfigNodes { + return &machineConfigNodes{ + client: c.RESTClient(), + } +} + +// Get takes name of the machineConfigNode, and returns the corresponding machineConfigNode object, and an error if there is any. +func (c *machineConfigNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.MachineConfigNode, err error) { + result = &v1alpha1.MachineConfigNode{} + err = c.client.Get(). + Resource("machineconfignodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MachineConfigNodes that match those selectors. +func (c *machineConfigNodes) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.MachineConfigNodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.MachineConfigNodeList{} + err = c.client.Get(). + Resource("machineconfignodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested machineConfigNodes. +func (c *machineConfigNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("machineconfignodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a machineConfigNode and creates it. Returns the server's representation of the machineConfigNode, and an error, if there is any. +func (c *machineConfigNodes) Create(ctx context.Context, machineConfigNode *v1alpha1.MachineConfigNode, opts v1.CreateOptions) (result *v1alpha1.MachineConfigNode, err error) { + result = &v1alpha1.MachineConfigNode{} + err = c.client.Post(). + Resource("machineconfignodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(machineConfigNode). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a machineConfigNode and updates it. Returns the server's representation of the machineConfigNode, and an error, if there is any. +func (c *machineConfigNodes) Update(ctx context.Context, machineConfigNode *v1alpha1.MachineConfigNode, opts v1.UpdateOptions) (result *v1alpha1.MachineConfigNode, err error) { + result = &v1alpha1.MachineConfigNode{} + err = c.client.Put(). + Resource("machineconfignodes"). + Name(machineConfigNode.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(machineConfigNode). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *machineConfigNodes) UpdateStatus(ctx context.Context, machineConfigNode *v1alpha1.MachineConfigNode, opts v1.UpdateOptions) (result *v1alpha1.MachineConfigNode, err error) { + result = &v1alpha1.MachineConfigNode{} + err = c.client.Put(). + Resource("machineconfignodes"). + Name(machineConfigNode.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(machineConfigNode). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the machineConfigNode and deletes it. Returns an error if one occurs. +func (c *machineConfigNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("machineconfignodes"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *machineConfigNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("machineconfignodes"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched machineConfigNode. +func (c *machineConfigNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.MachineConfigNode, err error) { + result = &v1alpha1.MachineConfigNode{} + err = c.client.Patch(pt). + Resource("machineconfignodes"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied machineConfigNode. +func (c *machineConfigNodes) Apply(ctx context.Context, machineConfigNode *machineconfigurationv1alpha1.MachineConfigNodeApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MachineConfigNode, err error) { + if machineConfigNode == nil { + return nil, fmt.Errorf("machineConfigNode provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(machineConfigNode) + if err != nil { + return nil, err + } + name := machineConfigNode.Name + if name == nil { + return nil, fmt.Errorf("machineConfigNode.Name must be provided to Apply") + } + result = &v1alpha1.MachineConfigNode{} + err = c.client.Patch(types.ApplyPatchType). + Resource("machineconfignodes"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *machineConfigNodes) ApplyStatus(ctx context.Context, machineConfigNode *machineconfigurationv1alpha1.MachineConfigNodeApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MachineConfigNode, err error) { + if machineConfigNode == nil { + return nil, fmt.Errorf("machineConfigNode provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(machineConfigNode) + if err != nil { + return nil, err + } + + name := machineConfigNode.Name + if name == nil { + return nil, fmt.Errorf("machineConfigNode.Name must be provided to Apply") + } + + result = &v1alpha1.MachineConfigNode{} + err = c.client.Patch(types.ApplyPatchType). + Resource("machineconfignodes"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfiguration_client.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfiguration_client.go new file mode 100644 index 0000000000..1b717606f3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfiguration_client.go @@ -0,0 +1,91 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type MachineconfigurationV1alpha1Interface interface { + RESTClient() rest.Interface + MachineConfigNodesGetter +} + +// MachineconfigurationV1alpha1Client is used to interact with features provided by the machineconfiguration.openshift.io group. +type MachineconfigurationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *MachineconfigurationV1alpha1Client) MachineConfigNodes() MachineConfigNodeInterface { + return newMachineConfigNodes(c) +} + +// NewForConfig creates a new MachineconfigurationV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*MachineconfigurationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new MachineconfigurationV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MachineconfigurationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &MachineconfigurationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new MachineconfigurationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MachineconfigurationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MachineconfigurationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *MachineconfigurationV1alpha1Client { + return &MachineconfigurationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MachineconfigurationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go index 49d95ad7ee..ce9d5374b0 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go @@ -6,6 +6,7 @@ import ( "fmt" v1 "github.com/openshift/api/machineconfiguration/v1" + v1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) @@ -48,6 +49,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1.SchemeGroupVersion.WithResource("machineconfigpools"): return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().MachineConfigPools().Informer()}, nil + // Group=machineconfiguration.openshift.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("machineconfignodes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1alpha1().MachineConfigNodes().Informer()}, nil + } return nil, fmt.Errorf("no informer found for %v", resource) diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/interface.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/interface.go index 6ccd6ae716..3e3e87e205 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/interface.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/interface.go @@ -5,12 +5,15 @@ package machineconfiguration import ( internalinterfaces "github.com/openshift/client-go/machineconfiguration/informers/externalversions/internalinterfaces" v1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" + v1alpha1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" ) // Interface provides access to each of this group's versions. type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface } type group struct { @@ -28,3 +31,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/interface.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/interface.go new file mode 100644 index 0000000000..89a69c9099 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/interface.go @@ -0,0 +1,29 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/openshift/client-go/machineconfiguration/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // MachineConfigNodes returns a MachineConfigNodeInformer. + MachineConfigNodes() MachineConfigNodeInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// MachineConfigNodes returns a MachineConfigNodeInformer. +func (v *version) MachineConfigNodes() MachineConfigNodeInformer { + return &machineConfigNodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineconfignode.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineconfignode.go new file mode 100644 index 0000000000..57df7775b8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineconfignode.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + machineconfigurationv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + versioned "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/machineconfiguration/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// MachineConfigNodeInformer provides access to a shared informer and lister for +// MachineConfigNodes. +type MachineConfigNodeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.MachineConfigNodeLister +} + +type machineConfigNodeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMachineConfigNodeInformer constructs a new informer for MachineConfigNode type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMachineConfigNodeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMachineConfigNodeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMachineConfigNodeInformer constructs a new informer for MachineConfigNode type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMachineConfigNodeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1alpha1().MachineConfigNodes().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1alpha1().MachineConfigNodes().Watch(context.TODO(), options) + }, + }, + &machineconfigurationv1alpha1.MachineConfigNode{}, + resyncPeriod, + indexers, + ) +} + +func (f *machineConfigNodeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMachineConfigNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *machineConfigNodeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&machineconfigurationv1alpha1.MachineConfigNode{}, f.defaultInformer) +} + +func (f *machineConfigNodeInformer) Lister() v1alpha1.MachineConfigNodeLister { + return v1alpha1.NewMachineConfigNodeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1/expansion_generated.go b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000..4a0ca00a01 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1/expansion_generated.go @@ -0,0 +1,7 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// MachineConfigNodeListerExpansion allows custom methods to be added to +// MachineConfigNodeLister. +type MachineConfigNodeListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1/machineconfignode.go b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1/machineconfignode.go new file mode 100644 index 0000000000..ab1f2ef044 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1/machineconfignode.go @@ -0,0 +1,52 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// MachineConfigNodeLister helps list MachineConfigNodes. +// All objects returned here must be treated as read-only. +type MachineConfigNodeLister interface { + // List lists all MachineConfigNodes in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.MachineConfigNode, err error) + // Get retrieves the MachineConfigNode from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.MachineConfigNode, error) + MachineConfigNodeListerExpansion +} + +// machineConfigNodeLister implements the MachineConfigNodeLister interface. +type machineConfigNodeLister struct { + indexer cache.Indexer +} + +// NewMachineConfigNodeLister returns a new MachineConfigNodeLister. +func NewMachineConfigNodeLister(indexer cache.Indexer) MachineConfigNodeLister { + return &machineConfigNodeLister{indexer: indexer} +} + +// List lists all MachineConfigNodes in the indexer. +func (s *machineConfigNodeLister) List(selector labels.Selector) (ret []*v1alpha1.MachineConfigNode, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.MachineConfigNode)) + }) + return ret, err +} + +// Get retrieves the MachineConfigNode from the index for a given name. +func (s *machineConfigNodeLister) Get(name string) (*v1alpha1.MachineConfigNode, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("machineconfignode"), name) + } + return obj.(*v1alpha1.MachineConfigNode), nil +} diff --git a/vendor/github.com/openshift/runtime-utils/pkg/registries/registries.go b/vendor/github.com/openshift/runtime-utils/pkg/registries/registries.go index 7a43e73a99..b4588613ee 100644 --- a/vendor/github.com/openshift/runtime-utils/pkg/registries/registries.go +++ b/vendor/github.com/openshift/runtime-utils/pkg/registries/registries.go @@ -161,16 +161,21 @@ func mergedTagMirrorSets(itmsRules []*apicfgv1.ImageTagMirrorSet) ([]mergedMirro return mergedMirrorSets(tagMirrorSets) } -// mergedDigestMirrorSets processes idmsRules and icspRules and returns a set of mergedMirrorSet, one for each Source value, +// mergedDigestMirrorSets processes idmsRules and returns a set of mergedMirrorSet, one for each Source value, // ordered consistently with the preference order of the individual entries (if possible) // E.g. given mirror sets (B, C) and (A, B), it will combine them into a single (A, B, C) set. -func mergedDigestMirrorSets(idmsRules []*apicfgv1.ImageDigestMirrorSet, icspRules []*apioperatorsv1alpha1.ImageContentSourcePolicy) ([]mergedMirrorSet, error) { - mirrorSets := newMirrorSets() +func mergedDigestMirrorSets(idmsRules []*apicfgv1.ImageDigestMirrorSet) ([]mergedMirrorSet, error) { + digestMirrorSets := newMirrorSets() for _, idms := range idmsRules { for _, set := range idms.Spec.ImageDigestMirrors { - mirrorSets.addMirrorSet(set.Source, set.MirrorSourcePolicy, set.Mirrors) + digestMirrorSets.addMirrorSet(set.Source, set.MirrorSourcePolicy, set.Mirrors) } } + return mergedMirrorSets(digestMirrorSets) +} + +func mergedICSPMirrorSets(icspRules []*apioperatorsv1alpha1.ImageContentSourcePolicy) ([]mergedMirrorSet, error) { + repositoryMirrorSets := newMirrorSets() for _, icsp := range icspRules { for _, set := range icsp.Spec.RepositoryDigestMirrors { imgMirrors := []apicfgv1.ImageMirror{} @@ -178,10 +183,10 @@ func mergedDigestMirrorSets(idmsRules []*apicfgv1.ImageDigestMirrorSet, icspRule imgMirrors = append(imgMirrors, apicfgv1.ImageMirror(m)) } // leave MirrorSourcePolicy blank, it will follow the default AllowContactingSource - mirrorSets.addMirrorSet(set.Source, "", imgMirrors) + repositoryMirrorSets.addMirrorSet(set.Source, "", imgMirrors) } } - return mergedMirrorSets(mirrorSets) + return mergedMirrorSets(repositoryMirrorSets) } // mirrorsAdjustedForNestedScope returns mirrors from mirroredScope, updated @@ -233,8 +238,10 @@ func registryScope(reg *sysregistriesv2.Registry) string { // A valid scope is in the form of registry/namespace...[/repo] (can also refer to sysregistriesv2.Registry.Prefix) // NOTE: Validation of wildcard entries is done before EditRegistriesConfig is called in the MCO code. func EditRegistriesConfig(config *sysregistriesv2.V2RegistriesConf, insecureScopes, blockedScopes []string, icspRules []*apioperatorsv1alpha1.ImageContentSourcePolicy, - idmsRules []*apicfgv1.ImageDigestMirrorSet, itmsRules []*apicfgv1.ImageTagMirrorSet, -) error { + idmsRules []*apicfgv1.ImageDigestMirrorSet, itmsRules []*apicfgv1.ImageTagMirrorSet) error { + if err := RejectMultiUpdateMirrorSetObjs(icspRules, idmsRules, itmsRules); err != nil { + return err + } // addRegistryEntry creates a Registry object corresponding to scope. // NOTE: The pointer is valid only until the next getRegistryEntry call. addRegistryEntry := func(scope string) *sysregistriesv2.Registry { @@ -276,12 +283,18 @@ func EditRegistriesConfig(config *sysregistriesv2.V2RegistriesConf, insecureScop } } - digestMirrorSets, err := mergedDigestMirrorSets(idmsRules, icspRules) + digestMirrorSets, err := mergedDigestMirrorSets(idmsRules) if err != nil { return err } addMirrorsToRegistries(digestMirrorSets, sysregistriesv2.MirrorByDigestOnly) + icspMirrorSets, err := mergedICSPMirrorSets(icspRules) + if err != nil { + return err + } + addMirrorsToRegistries(icspMirrorSets, sysregistriesv2.MirrorByDigestOnly) + tagMirrorSets, err := mergedTagMirrorSets(itmsRules) if err != nil { return err @@ -328,7 +341,7 @@ func EditRegistriesConfig(config *sysregistriesv2.V2RegistriesConf, insecureScop } } - allMirrorSets := append(digestMirrorSets, tagMirrorSets...) + allMirrorSets := append(icspMirrorSets, append(digestMirrorSets, tagMirrorSets...)...) for _, mirrorSet := range allMirrorSets { mirroredReg := getRegistryEntry(mirrorSet.source) mirroredScope := registryScope(mirroredReg) @@ -368,3 +381,16 @@ func IsValidRegistriesConfScope(scope string) bool { } return false } + +// RejectMultiUpdateMirrorSetObjs returns error if icsp objects exist with imagedigestmirrorset or imagetagmirrorset objects +// to avoid existing mirror settings get updated by others +func RejectMultiUpdateMirrorSetObjs(icspRules []*apioperatorsv1alpha1.ImageContentSourcePolicy, + idmsRules []*apicfgv1.ImageDigestMirrorSet, itmsRules []*apicfgv1.ImageTagMirrorSet) error { + if len(icspRules) > 0 && len(idmsRules) > 0 { + return fmt.Errorf("error: both imagecontentsourcepolicy and imagedigestmirrorset exist") + } + if len(icspRules) > 0 && len(itmsRules) > 0 { + return fmt.Errorf("error: both imagecontentsourcepolicy and imagetagmirrorset exist") + } + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index 799d866d51..9887d185b2 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -214,9 +214,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) { } } - if union.Discriminator != nil && len(union.Fields) == 0 { - return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator) - } return union, nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 1d9f0d9c90..1b298fa9ac 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -324,7 +324,7 @@ github.com/esimonov/ifshort/pkg/analyzer # github.com/ettle/strcase v0.1.1 ## explicit; go 1.12 github.com/ettle/strcase -# github.com/evanphx/json-patch v4.12.0+incompatible +# github.com/evanphx/json-patch v5.6.0+incompatible ## explicit github.com/evanphx/json-patch # github.com/evanphx/json-patch/v5 v5.6.0 @@ -567,7 +567,7 @@ github.com/gostaticanalysis/forcetypeassert # github.com/gostaticanalysis/nilerr v0.1.1 ## explicit; go 1.15 github.com/gostaticanalysis/nilerr -# github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 +# github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 ## explicit github.com/gregjones/httpcache # github.com/hashicorp/errwrap v1.1.0 @@ -802,7 +802,7 @@ github.com/opencontainers/runc/libcontainer/user # github.com/opencontainers/runtime-spec v1.1.0-rc.3 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/openshift/api v0.0.0-20231013202211-096c446e7f60 +# github.com/openshift/api v0.0.0-20231101131954-24085c95a7a2 ## explicit; go 1.20 github.com/openshift/api github.com/openshift/api/apiserver @@ -837,10 +837,12 @@ github.com/openshift/api/machine/v1 github.com/openshift/api/machine/v1alpha1 github.com/openshift/api/machine/v1beta1 github.com/openshift/api/machineconfiguration/v1 +github.com/openshift/api/machineconfiguration/v1alpha1 github.com/openshift/api/monitoring github.com/openshift/api/monitoring/v1alpha1 github.com/openshift/api/network github.com/openshift/api/network/v1 +github.com/openshift/api/network/v1alpha1 github.com/openshift/api/networkoperator github.com/openshift/api/networkoperator/v1 github.com/openshift/api/oauth @@ -873,7 +875,7 @@ github.com/openshift/api/template github.com/openshift/api/template/v1 github.com/openshift/api/user github.com/openshift/api/user/v1 -# github.com/openshift/client-go v0.0.0-20231005121823-e81400b97c46 +# github.com/openshift/client-go v0.0.0-20231110140829-a6ca51f6d5ba ## explicit; go 1.20 github.com/openshift/client-go/build/applyconfigurations/build/v1 github.com/openshift/client-go/build/applyconfigurations/internal @@ -911,16 +913,21 @@ github.com/openshift/client-go/image/clientset/versioned/scheme github.com/openshift/client-go/image/clientset/versioned/typed/image/v1 github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1 +github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1 github.com/openshift/client-go/machineconfiguration/clientset/versioned github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1 github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake +github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1 +github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/fake github.com/openshift/client-go/machineconfiguration/informers/externalversions github.com/openshift/client-go/machineconfiguration/informers/externalversions/internalinterfaces github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1 +github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1 github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1 +github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1 github.com/openshift/client-go/operator/applyconfigurations/internal github.com/openshift/client-go/operator/applyconfigurations/operator/v1 github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1 @@ -938,10 +945,10 @@ github.com/openshift/client-go/operator/informers/externalversions/operator/v1 github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1 github.com/openshift/client-go/operator/listers/operator/v1 github.com/openshift/client-go/operator/listers/operator/v1alpha1 -# github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20230516205036-088c6d48cc1a +# github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231110142214-403ea8439974 ## explicit; go 1.20 github.com/openshift/cluster-config-operator/pkg/operator/featuregates -# github.com/openshift/library-go v0.0.0-20231017173800-126f85ed0cc7 +# github.com/openshift/library-go v0.0.0-20231020125034-5a2d9fe760b3 ## explicit; go 1.20 github.com/openshift/library-go/pkg/cloudprovider github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers @@ -961,7 +968,7 @@ github.com/openshift/library-go/pkg/operator/resource/resourceread github.com/openshift/library-go/pkg/operator/resourcesynccontroller github.com/openshift/library-go/pkg/operator/status github.com/openshift/library-go/pkg/operator/v1helpers -# github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b +# github.com/openshift/runtime-utils v0.0.0-20220926190846-5c488b20a19f ## explicit; go 1.18 github.com/openshift/runtime-utils/pkg/registries # github.com/pelletier/go-toml/v2 v2.0.8 @@ -2052,7 +2059,7 @@ k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1 k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1 -# k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 +# k8s.io/kube-openapi v0.0.0-20230905202853-d090da108d2f => github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0 ## explicit; go 1.19 k8s.io/kube-openapi/cmd/openapi-gen/args k8s.io/kube-openapi/pkg/builder3/util @@ -2236,3 +2243,4 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml +# k8s.io/kube-openapi => github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0